filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
controllers/os-coreos-alicloud/cmd/gardener-extension-os-coreos-alicloud/app/app.go
|
// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
"github.com/gardener/gardener-extensions/controllers/os-coreos-alicloud/pkg/coreos-alicloud"
"github.com/gardener/gardener-extensions/pkg/controller"
controllercmd "github.com/gardener/gardener-extensions/pkg/controller/cmd"
"github.com/spf13/cobra"
"os"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// Name is the name of the CoreOS Alicloud controller.
const Name = "os-coreos-alicloud"
// NewControllerCommand creates a new command for running a CoreOS Alicloud controller.
func NewControllerCommand(ctx context.Context) *cobra.Command {
var (
restOpts = &controllercmd.RESTOptions{}
mgrOpts = &controllercmd.ManagerOptions{
LeaderElection: true,
LeaderElectionID: controllercmd.LeaderElectionNameID(Name),
LeaderElectionNamespace: os.Getenv("LEADER_ELECTION_NAMESPACE"),
}
ctrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
aggOption = controllercmd.NewOptionAggregator(restOpts, mgrOpts, ctrlOpts)
)
cmd := &cobra.Command{
Use: "os-coreos-alicloud-controller-manager",
Run: func(cmd *cobra.Command, args []string) {
if err := aggOption.Complete(); err != nil {
controllercmd.LogErrAndExit(err, "Error completing options")
}
mgr, err := manager.New(restOpts.Completed().Config, mgrOpts.Completed().Options())
if err != nil {
controllercmd.LogErrAndExit(err, "Could not instantiate manager")
}
if err := controller.AddToScheme(mgr.GetScheme()); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
ctrlOpts.Completed().Apply(&coreos.DefaultAddOptions.Controller)
if err := coreos.AddToManager(mgr); err != nil {
controllercmd.LogErrAndExit(err, "Could not add controller to manager")
}
if err := mgr.Start(ctx.Done()); err != nil {
controllercmd.LogErrAndExit(err, "Error running manager")
}
},
}
aggOption.AddFlags(cmd.Flags())
return cmd
}
|
[
"\"LEADER_ELECTION_NAMESPACE\""
] |
[] |
[
"LEADER_ELECTION_NAMESPACE"
] |
[]
|
["LEADER_ELECTION_NAMESPACE"]
|
go
| 1 | 0 | |
mmhuman3d/core/visualization/renderer/torch3d_renderer/silhouette_renderer.py
|
from typing import Iterable, List, Optional, Tuple, Union
import torch
from pytorch3d.structures import Meshes
from .base_renderer import MeshBaseRenderer
from .builder import RENDERER
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
@RENDERER.register_module(name=[
'silhouette', 'silhouette_renderer', 'Silhouette', 'SilhouetteRenderer'
])
class SilhouetteRenderer(MeshBaseRenderer):
"""Silhouette renderer."""
def __init__(
self,
resolution: Tuple[int, int],
device: Union[torch.device, str] = 'cpu',
output_path: Optional[str] = None,
return_type: Optional[List] = None,
out_img_format: str = '%06d.png',
projection: Literal['weakperspective', 'fovperspective',
'orthographics', 'perspective',
'fovorthographics'] = 'weakperspective',
in_ndc: bool = True,
**kwargs,
) -> None:
"""SilhouetteRenderer for neural rendering and visualization.
Args:
resolution (Iterable[int]):
(width, height) of the rendered images resolution.
device (Union[torch.device, str], optional):
You can pass a str or torch.device for cpu or gpu render.
Defaults to 'cpu'.
output_path (Optional[str], optional):
Output path of the video or images to be saved.
Defaults to None.
return_type (Optional[Literal[, optional): the type of tensor to be
returned. 'tensor' denotes return the determined tensor. E.g.,
return silhouette tensor of (B, H, W) for SilhouetteRenderer.
'rgba' denotes the colorful RGBA tensor to be written.
Will return a 3 channel mask for 'tensor' and 4 channel for
'rgba'.
Defaults to None.
out_img_format (str, optional): The image format string for
saving the images.
Defaults to '%06d.png'.
projection (str, optional):
Projection type of camera.
Defaults to 'weakperspetive'.
in_ndc (bool, optional): Whether defined in NDC.
Defaults to True.
Returns:
None
"""
super().__init__(
resolution=resolution,
device=device,
output_path=output_path,
obj_path=None,
return_type=return_type,
out_img_format=out_img_format,
projection=projection,
in_ndc=in_ndc,
**kwargs)
def set_render_params(self, **kwargs):
super().set_render_params(**kwargs)
self.shader_type = 'silhouette'
def forward(self,
meshes: Optional[Meshes] = None,
vertices: Optional[torch.Tensor] = None,
faces: Optional[torch.Tensor] = None,
K: Optional[torch.Tensor] = None,
R: Optional[torch.Tensor] = None,
T: Optional[torch.Tensor] = None,
images: Optional[torch.Tensor] = None,
indexes: Iterable[str] = None,
**kwargs):
"""The params are the same as MeshBaseRenderer."""
meshes = self.prepare_meshes(meshes, vertices, faces)
cameras = self.init_cameras(K=K, R=R, T=T)
renderer = self.init_renderer(cameras, None)
rendered_images = renderer(meshes)
silhouette_map = rendered_images[..., 3:]
valid_masks = (silhouette_map > 0) * 1.0
if self.output_path is not None or 'rgba' in self.return_type:
rgbs = silhouette_map.repeat(1, 1, 1, 3)
if self.output_path is not None:
self.write_images(rgbs, valid_masks, images, indexes)
results = {}
if 'tensor' in self.return_type:
results.update(tensor=silhouette_map)
if 'rgba' in self.return_type:
results.update(rgba=valid_masks.repeat(1, 1, 1, 4))
return results
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
bin/generate-database-summary.py
|
import sys
import os
import json
from boto3.s3.transfer import S3Transfer
import boto3
from dateutil.parser import parse
from datetime import datetime
from time import gmtime, strftime
from neo4j import GraphDatabase
# USAGE python bin/generate-database-report.py
#
# If files are being pushed to S3 will need to have aws cli configured
# https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html
#
# Evironment Variables
#
# REQUIRED: $AGR_VERSION
# $AWS_ACCESS_KEY_ID (OPTIONAL if specifying none "local" environment)
# $AWS_SECRET_ACCESS_KEY (OPTIONAL if specifying none "local" environment)
#
# OPTIONAL: $AGR_ENV (build, test, stage, prod)
# If not set will default to creating local file
# $AGR_DB_URI bolt url
bucket = "agr-db-reports"
bucketFolder = "qc-database-summary"
uri = "bolt://localhost:7687"
if "AGR_DB_URI" in os.environ:
uri = os.environ["AGR_DB_URI"]
if "AGR_VERSION" in os.environ:
agrVersion = os.environ['AGR_VERSION']
else:
print("Environment variable not set: AGR_VERSION")
exit(1)
if "AGR_ENV" in os.environ:
agrEnvironment = os.environ['AGR_ENV']
print("Using environment: ", agrEnvironment)
else:
agrEnvironment = None
datetimeNow = strftime("%Y-%m-%d_%H_%M_%S", gmtime())
if agrEnvironment:
filename = "alliance-db-summary-" + agrEnvironment + "-" + agrVersion + "-" + datetimeNow + ".json"
driver = GraphDatabase.driver(uri)
with driver.session() as session:
summary = {}
entities = {}
with session.begin_transaction() as tx:
for record in tx.run("""
MATCH (entity)
WITH labels(entity) AS entityTypes
RETURN count(entityTypes) AS frequency,
entityTypes"""):
frequency = record["frequency"]
entityTypes = record["entityTypes"]
if (len(entityTypes) == 1 and entityTypes[0] != "Load"):
entities[entityTypes[0]] = frequency
elif len(entityTypes) == 2:
if entityTypes[1] in entities:
entities[entityTypes[1]][entityTypes[0]] = frequency
else:
entities[entityTypes[1]] = {entityTypes[0]: frequency}
if entityTypes[0] in entities:
entities[entityTypes[0]][entityTypes[1]] = frequency
else:
entities[entityTypes[0]] = {entityTypes[1]: frequency}
entityKeys = list(entities.keys()).copy()
for key in entityKeys:
if not isinstance(entities[key], int):
if len(entities[key].keys()) == 1:
subKey = list(entities[key].keys())[0]
if subKey in entities:
del entities[key]
summary = { "overview": entities }
if agrEnvironment:
filePath = "reports/" + filename
with open(filePath, 'w') as f:
print("Writing summary to file: ", filePath)
f.write(json.dumps(summary, indent=4, sort_keys=True))
if agrEnvironment != "local":
print("Uploading to S3 bucket: ", bucketFolder)
if "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ:
client = boto3.client('s3', aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"], aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"])
transfer = S3Transfer(client)
transfer.upload_file(filePath, bucket, bucketFolder + "/" + filename)
else:
print("ERROR: access keys AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY needs to be set")
exit(1)
else:
print(json.dumps(summary, indent=4, sort_keys=True))
|
[] |
[] |
[
"AWS_SECRET_ACCESS_KEY",
"AGR_DB_URI",
"AGR_VERSION",
"AGR_ENV",
"AWS_ACCESS_KEY_ID"
] |
[]
|
["AWS_SECRET_ACCESS_KEY", "AGR_DB_URI", "AGR_VERSION", "AGR_ENV", "AWS_ACCESS_KEY_ID"]
|
python
| 5 | 0 | |
tests/framework/common.go
|
package framework
import (
"bytes"
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"math"
"math/big"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/docker/docker/client"
"github.com/fatih/color"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
certman "github.com/jetstack/cert-manager/pkg/client/clientset/versioned"
"github.com/pkg/errors"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
helmcli "helm.sh/helm/v3/pkg/cli"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
"sigs.k8s.io/kind/pkg/cluster"
"sigs.k8s.io/kind/pkg/cluster/nodeutils"
"github.com/openservicemesh/osm/pkg/cli"
"github.com/openservicemesh/osm/pkg/constants"
"github.com/openservicemesh/osm/pkg/utils"
)
// Td the global context for test.
var Td OsmTestData
// Since parseFlags is global, this is the Ginkgo way to do it.
// "init" is usually called by the go test runtime
// https://github.com/onsi/ginkgo/issues/265
func init() {
registerFlags(&Td)
}
// Cleanup when error
var _ = BeforeEach(func() {
Expect(Td.InitTestData(GinkgoT())).To(BeNil())
})
// Cleanup when error
var _ = AfterEach(func() {
Td.Cleanup(Test)
})
var _ = AfterSuite(func() {
Td.Cleanup(Suite)
})
const (
// contant, default name for the Registry Secret
registrySecretName = "acr-creds"
// test tag prefix, for NS labeling
osmTest = "osmTest"
)
var (
// default name for the mesh
defaultOsmNamespace = "osm-system"
// default image tag
defaultImageTag = "latest"
// default cert manager
defaultCertManager = "tresor"
// default enable NS metrics tag
defaultEnableNsMetricTag = true
// default enable debug server
defaultEnableDebugServer = true
// default deploy Prometheus
defaultDeployPrometheus = false
// default deploy Grafana
defaultDeployGrafana = false
// default deploy Jaeger
defaultDeployJaeger = false
// default deploy Fluentbit
defaultDeployFluentbit = false
// default envoy loglevel
defaultEnvoyLogLevel = "debug"
)
// OSMDescribeInfo is a struct to represent the Tier and Bucket of a given e2e test
type OSMDescribeInfo struct {
// Tier represents the priority of the test. Lower value indicates higher priority.
Tier int
// Bucket indicates in which test Bucket the test will run in for CI. Each
// Bucket is run in parallel while tests in the same Bucket run sequentially.
Bucket int
}
func (o OSMDescribeInfo) String() string {
return fmt.Sprintf("[Tier %d][Bucket %d]", o.Tier, o.Bucket)
}
// OSMDescribe givens the description of an e2e test
func OSMDescribe(name string, opts OSMDescribeInfo, body func()) bool {
return Describe(opts.String()+" "+name, body)
}
// InstallType defines several OSM test deployment scenarios
type InstallType string
const (
// SelfInstall uses current kube cluster, installs OSM using CLI
SelfInstall InstallType = "SelfInstall"
// KindCluster Creates Kind cluster on docker and uses it as cluster, OSM installs through CLI
KindCluster InstallType = "KindCluster"
// NoInstall uses current kube cluster, assumes an OSM is present in `OsmNamespace`
NoInstall InstallType = "NoInstall"
)
// Verifies the instType string flag option is a valid enum type
func verifyValidInstallType(t InstallType) error {
switch t {
case SelfInstall, KindCluster, NoInstall:
return nil
default:
return errors.Errorf("%s is not a valid OSM install type", string(t))
}
}
// OsmTestData stores common state, variables and flags for the test at hand
type OsmTestData struct {
T GinkgoTInterface // for common test logging
TestID uint64 // uint randomized for every test. GinkgoRandomSeed can't be used as is per-suite.
TestFolderName string // Test folder name, when overridden by test flags
CleanupTest bool // Cleanup test-related resources once finished
WaitForCleanup bool // Forces test to wait for effective deletion of resources upon cleanup
// OSM install-time variables
InstType InstallType // Install type.
OsmNamespace string
OsmImageTag string
EnableNsMetricTag bool
// Container registry related vars
CtrRegistryUser string // registry login
CtrRegistryPassword string // registry password, if any
CtrRegistryServer string // server name. Has to be network reachable
// Kind cluster related vars
ClusterName string // Kind cluster name (used if kindCluster)
CleanupKindClusterBetweenTests bool // Clean and re-create kind cluster between tests
CleanupKindCluster bool // Cleanup kind cluster upon test finish
// Cluster handles and rest config
Env *cli.EnvSettings
RestConfig *rest.Config
Client *kubernetes.Clientset
SmiClients *smiClients
ClusterProvider *cluster.Provider // provider, used when kindCluster is used
}
// Function to run at init before Ginkgo has called parseFlags
// See suite_test.go for details on how Ginko calls parseFlags
func registerFlags(td *OsmTestData) {
flag.BoolVar(&td.CleanupTest, "cleanupTest", true, "Cleanup test resources when done")
flag.BoolVar(&td.WaitForCleanup, "waitForCleanup", true, "Wait for effective deletion of resources")
flag.StringVar(&td.TestFolderName, "testFolderName", "", "Test folder name")
flag.StringVar((*string)(&td.InstType), "installType", string(SelfInstall), "Type of install/deployment for OSM")
flag.StringVar(&td.ClusterName, "kindClusterName", "osm-e2e", "Name of the Kind cluster to be created")
flag.BoolVar(&td.CleanupKindCluster, "cleanupKindCluster", true, "Cleanup kind cluster upon exit")
flag.BoolVar(&td.CleanupKindClusterBetweenTests, "cleanupKindClusterBetweenTests", false, "Cleanup kind cluster between tests")
flag.StringVar(&td.CtrRegistryServer, "ctrRegistry", os.Getenv("CTR_REGISTRY"), "Container registry")
flag.StringVar(&td.CtrRegistryUser, "ctrRegistryUser", os.Getenv("CTR_REGISTRY_USER"), "Container registry")
flag.StringVar(&td.CtrRegistryPassword, "ctrRegistrySecret", os.Getenv("CTR_REGISTRY_PASSWORD"), "Container registry secret")
flag.StringVar(&td.OsmImageTag, "osmImageTag", utils.GetEnv("CTR_TAG", defaultImageTag), "OSM image tag")
flag.StringVar(&td.OsmNamespace, "OsmNamespace", utils.GetEnv("K8S_NAMESPACE", defaultOsmNamespace), "OSM Namespace")
flag.BoolVar(&td.EnableNsMetricTag, "EnableMetricsTag", defaultEnableNsMetricTag, "Enable taggic Namespaces for metric collection")
}
// GetTestFile prefixes a filename with current test folder
// and creates the test folder for current test if it doesn't exists.
// Only if some part of a test calls this function the test folder will be created,
// otherwise nothing is created to avoid extra clutter.
func (td *OsmTestData) GetTestFile(filename string) string {
var testDir string
if len(td.TestFolderName) == 0 {
testDir = fmt.Sprintf("test-%d", td.TestID)
} else {
testDir = td.TestFolderName
}
err := os.Mkdir(testDir, 0750)
exists := false
if err == nil {
td.T.Logf("Created test dir %s", testDir)
exists = true
}
if os.IsExist(err) || exists {
return fmt.Sprintf("./%s/%s", testDir, filename)
}
return ""
}
// GetTestNamespaceSelectorMap returns a string-based selector used to refer/select all namespace
// resources for this test
func (td *OsmTestData) GetTestNamespaceSelectorMap() map[string]string {
return map[string]string{
osmTest: fmt.Sprintf("%d", GinkgoRandomSeed()),
}
}
// AreRegistryCredsPresent checks if Registry Credentials are present
// It's usually used to factor if a docker registry secret and ImagePullSecret
// should be installed when creating namespaces and application templates
func (td *OsmTestData) AreRegistryCredsPresent() bool {
return len(td.CtrRegistryUser) > 0 && len(td.CtrRegistryPassword) > 0
}
// InitTestData Initializes the test structures
// Called by Gingkgo BeforeEach
func (td *OsmTestData) InitTestData(t GinkgoTInterface) error {
td.T = t
r, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
if err != nil {
return err
}
td.TestID = r.Uint64()
td.T.Log(color.HiGreenString("ID for test: %d", td.TestID))
err = verifyValidInstallType(td.InstType)
if err != nil {
return err
}
if (td.InstType == KindCluster) && td.ClusterProvider == nil {
td.ClusterProvider = cluster.NewProvider()
td.T.Logf("Creating local kind cluster")
if err := td.ClusterProvider.Create(td.ClusterName); err != nil {
return errors.Wrap(err, "failed to create kind cluster")
}
}
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
clientcmd.NewDefaultClientConfigLoadingRules(),
&clientcmd.ConfigOverrides{},
)
kubeConfig, err := clientConfig.ClientConfig()
if err != nil {
return errors.Wrap(err, "failed to get Kubernetes config")
}
clientset, err := kubernetes.NewForConfig(kubeConfig)
if err != nil {
return errors.Wrap(err, "failed to create Kubernetes client")
}
td.RestConfig = kubeConfig
td.Client = clientset
td.Env = cli.New()
if err := td.InitSMIClients(); err != nil {
return errors.Wrap(err, "failed to initialize SMI clients")
}
// After client creations, do a wait for kind cluster just in case it's not done yet coming up
// Ballparking pod number. kind has a large number of containers to run by default
if (td.InstType == KindCluster) && td.ClusterProvider != nil {
if err := td.WaitForPodsRunningReady("kube-system", 120*time.Second, 5); err != nil {
return errors.Wrap(err, "failed to wait for kube-system pods")
}
}
return nil
}
// InstallOSMOpts describes install options for OSM
type InstallOSMOpts struct {
ControlPlaneNS string
CertManager string
ContainerRegistryLoc string
ContainerRegistrySecret string
OsmImagetag string
DeployGrafana bool
DeployPrometheus bool
DeployJaeger bool
DeployFluentbit bool
VaultHost string
VaultProtocol string
VaultToken string
VaultRole string
CertmanagerIssuerGroup string
CertmanagerIssuerKind string
CertmanagerIssuerName string
EgressEnabled bool
EnablePermissiveMode bool
EnvoyLogLevel string
EnableDebugServer bool
SetOverrides []string
}
// GetOSMInstallOpts initializes install options for OSM
func (td *OsmTestData) GetOSMInstallOpts() InstallOSMOpts {
return InstallOSMOpts{
ControlPlaneNS: td.OsmNamespace,
CertManager: defaultCertManager,
ContainerRegistryLoc: td.CtrRegistryServer,
ContainerRegistrySecret: td.CtrRegistryPassword,
OsmImagetag: td.OsmImageTag,
DeployGrafana: defaultDeployGrafana,
DeployPrometheus: defaultDeployPrometheus,
DeployJaeger: defaultDeployJaeger,
DeployFluentbit: defaultDeployFluentbit,
VaultHost: "vault." + td.OsmNamespace + ".svc.cluster.local",
VaultProtocol: "http",
VaultRole: "openservicemesh",
VaultToken: "token",
CertmanagerIssuerGroup: "cert-manager.io",
CertmanagerIssuerKind: "Issuer",
CertmanagerIssuerName: "osm-ca",
EnvoyLogLevel: defaultEnvoyLogLevel,
EnableDebugServer: defaultEnableDebugServer,
SetOverrides: []string{},
}
}
// HelmInstallOSM installs an osm control plane using the osm chart which lives in charts/osm
func (td *OsmTestData) HelmInstallOSM(release, namespace string) error {
if td.InstType == KindCluster {
if err := td.loadOSMImagesIntoKind(); err != nil {
return err
}
}
values := fmt.Sprintf("OpenServiceMesh.image.registry=%s,OpenServiceMesh.image.tag=%s,OpenServiceMesh.meshName=%s", td.CtrRegistryServer, td.OsmImageTag, release)
args := []string{"install", release, "../../charts/osm", "--set", values, "--namespace", namespace, "--create-namespace", "--wait"}
stdout, stderr, err := td.RunLocal("helm", args)
if err != nil {
td.T.Logf("stdout:\n%s", stdout)
return errors.Errorf("failed to run helm install with osm chart: %s", stderr)
}
return nil
}
// DeleteHelmRelease uninstalls a particular helm release
func (td *OsmTestData) DeleteHelmRelease(name, namespace string) error {
args := []string{"uninstall", name, "--namespace", namespace}
_, _, err := td.RunLocal("helm", args)
if err != nil {
td.T.Fatal(err)
}
return nil
}
// LoadImagesToKind loads the list of images to the node for Kind clusters
func (td *OsmTestData) LoadImagesToKind(imageNames []string) error {
if td.InstType != KindCluster {
td.T.Log("Not a Kind cluster, nothing to load")
return nil
}
td.T.Log("Getting image data")
docker, err := client.NewClientWithOpts(client.WithAPIVersionNegotiation())
if err != nil {
return errors.Wrap(err, "failed to create docker client")
}
var imageIDs []string
for _, name := range imageNames {
imageName := fmt.Sprintf("%s/%s:%s", td.CtrRegistryServer, name, td.OsmImageTag)
imageIDs = append(imageIDs, imageName)
}
imageData, err := docker.ImageSave(context.TODO(), imageIDs)
if err != nil {
return errors.Wrap(err, "failed to get image data")
}
defer imageData.Close() //nolint: errcheck,gosec
nodes, err := td.ClusterProvider.ListNodes(td.ClusterName)
if err != nil {
return errors.Wrap(err, "failed to list kind nodes")
}
for _, n := range nodes {
td.T.Log("Loading images onto node", n)
if err := nodeutils.LoadImageArchive(n, imageData); err != nil {
return errors.Wrap(err, "failed to load images")
}
}
return nil
}
// InstallOSM installs OSM. The behavior of this function is dependant on
// installType and instOpts
func (td *OsmTestData) InstallOSM(instOpts InstallOSMOpts) error {
if td.InstType == NoInstall {
if instOpts.CertManager != defaultCertManager ||
instOpts.DeployPrometheus != defaultDeployPrometheus ||
instOpts.DeployGrafana != defaultDeployGrafana ||
instOpts.DeployJaeger != defaultDeployJaeger ||
instOpts.DeployFluentbit != defaultDeployFluentbit {
Skip("Skipping test: NoInstall marked on a test that requires modified install")
}
// TODO: Check there is a valid OSM instance running already in OsmNamespace
// This resets supported dynamic configs expected by the caller
err := td.UpdateOSMConfig("egress",
fmt.Sprintf("%t", instOpts.EgressEnabled))
if err != nil {
return err
}
err = td.UpdateOSMConfig("permissive_traffic_policy_mode",
fmt.Sprintf("%t", instOpts.EnablePermissiveMode))
if err != nil {
return err
}
err = td.UpdateOSMConfig("enable_debug_server",
fmt.Sprintf("%t", instOpts.EnableDebugServer))
if err != nil {
return err
}
return nil
}
if td.InstType == KindCluster {
if err := td.loadOSMImagesIntoKind(); err != nil {
return errors.Wrap(err, "failed to load OSM images to nodes for Kind cluster")
}
}
if err := td.CreateNs(instOpts.ControlPlaneNS, nil); err != nil {
return errors.Wrap(err, "failed to create namespace "+instOpts.ControlPlaneNS)
}
var args []string
args = append(args, "install",
"--container-registry="+instOpts.ContainerRegistryLoc,
"--osm-image-tag="+instOpts.OsmImagetag,
"--osm-namespace="+instOpts.ControlPlaneNS,
"--certificate-manager="+instOpts.CertManager,
"--enable-egress="+strconv.FormatBool(instOpts.EgressEnabled),
"--enable-permissive-traffic-policy="+strconv.FormatBool(instOpts.EnablePermissiveMode),
"--enable-debug-server="+strconv.FormatBool(instOpts.EnableDebugServer),
"--envoy-log-level="+instOpts.EnvoyLogLevel,
)
switch instOpts.CertManager {
case "vault":
if err := td.installVault(instOpts); err != nil {
return err
}
args = append(args,
"--vault-host="+instOpts.VaultHost,
"--vault-token="+instOpts.VaultToken,
"--vault-protocol="+instOpts.VaultProtocol,
"--vault-role="+instOpts.VaultRole,
)
case "cert-manager":
if err := td.installCertManager(instOpts); err != nil {
return err
}
args = append(args,
"--cert-manager-issuer-name="+instOpts.CertmanagerIssuerName,
"--cert-manager-issuer-kind="+instOpts.CertmanagerIssuerKind,
"--cert-manager-issuer-group="+instOpts.CertmanagerIssuerGroup,
)
}
if !(td.InstType == KindCluster) {
// Making sure the image is always pulled in registry-based testing
args = append(args, "--osm-image-pull-policy=Always")
}
if len(instOpts.ContainerRegistrySecret) != 0 {
args = append(args, "--container-registry-secret="+registrySecretName)
}
args = append(args, fmt.Sprintf("--deploy-prometheus=%v", instOpts.DeployPrometheus))
args = append(args, fmt.Sprintf("--deploy-grafana=%v", instOpts.DeployGrafana))
args = append(args, fmt.Sprintf("--deploy-jaeger=%v", instOpts.DeployJaeger))
args = append(args, fmt.Sprintf("--enable-fluentbit=%v", instOpts.DeployFluentbit))
args = append(args, fmt.Sprintf("--timeout=%v", 90*time.Second))
if len(instOpts.SetOverrides) > 0 {
separator := "="
finalLine := "--set"
for _, override := range instOpts.SetOverrides {
finalLine = finalLine + separator + override
separator = ","
}
args = append(args, finalLine)
}
td.T.Log("Installing OSM")
stdout, stderr, err := td.RunLocal(filepath.FromSlash("../../bin/osm"), args)
if err != nil {
td.T.Logf("error running osm install")
td.T.Logf("stdout:\n%s", stdout)
td.T.Logf("stderr:\n%s", stderr)
return errors.Wrap(err, "failed to run osm install")
}
return nil
}
// RestartOSMController restarts the osm-controller pod in the installed controller's namespace
func (td *OsmTestData) RestartOSMController(instOpts InstallOSMOpts) error {
labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"app": constants.OSMControllerName}}
listOptions := metav1.ListOptions{
LabelSelector: labels.Set(labelSelector.MatchLabels).String(),
}
controllerPods, err := td.Client.CoreV1().Pods(instOpts.ControlPlaneNS).List(context.TODO(), listOptions)
if err != nil {
return errors.Wrap(err, "error fetching controller pod")
}
if len(controllerPods.Items) != 1 {
return errors.Errorf("expected 1 osm-controller pod, got %d", len(controllerPods.Items))
}
pod := controllerPods.Items[0]
// Delete the pod and let k8s spin it up again
err = td.Client.CoreV1().Pods(instOpts.ControlPlaneNS).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
if err != nil {
return errors.Wrap(err, "erorr deleting osm-controller pod")
}
return nil
}
// GetConfigMap is a wrapper to get a config map by name in a particular namespace
func (td *OsmTestData) GetConfigMap(name, namespace string) (*corev1.ConfigMap, error) {
configmap, err := td.Client.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return configmap, nil
}
func (td *OsmTestData) loadOSMImagesIntoKind() error {
imageNames := []string{
"osm-controller",
"init",
}
return td.LoadImagesToKind(imageNames)
}
func (td *OsmTestData) installVault(instOpts InstallOSMOpts) error {
td.T.Log("Installing Vault")
replicas := int32(1)
terminationGracePeriodSeconds := int64(10)
vaultDep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "vault",
Labels: map[string]string{
"app": "vault",
},
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "vault",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "vault",
},
},
Spec: corev1.PodSpec{
TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
Containers: []corev1.Container{
{
Name: "vault",
Image: "vault:1.4.0",
ImagePullPolicy: corev1.PullAlways,
Command: []string{"/bin/sh", "-c"},
Args: []string{
fmt.Sprintf(`
# The TTL for the expiration of CA certificate must be beyond that of the longest
# TTL for a certificate issued by OSM. The longest TTL for a certificate issued
# within OSM is 87600h.
# Start the Vault Server
vault server -dev -dev-listen-address=0.0.0.0:8200 -dev-root-token-id=%s & sleep 1;
# Make the token available to the following commands
echo %s>~/.vault-token;
# Enable PKI secrets engine
vault secrets enable pki;
# Set the max allowed lease for a certificate to a decade
vault secrets tune -max-lease-ttl=87700h pki;
# Set the URLs (See: https://www.vaultproject.io/docs/secrets/pki#set-url-configuration)
vault write pki/config/urls issuing_certificates='http://127.0.0.1:8200/v1/pki/ca' crl_distribution_points='http://127.0.0.1:8200/v1/pki/crl';
# Configure a role for OSM (See: https://www.vaultproject.io/docs/secrets/pki#configure-a-role)
vault write pki/roles/%s allow_any_name=true allow_subdomains=true max_ttl=87700h;
# Create the root certificate (See: https://www.vaultproject.io/docs/secrets/pki#setup)
vault write pki/root/generate/internal common_name='osm.root' ttl='87700h';
tail /dev/random;
`, instOpts.VaultToken, instOpts.VaultToken, instOpts.VaultRole),
},
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"IPC_LOCK",
},
},
},
Ports: []corev1.ContainerPort{
{
ContainerPort: 8200,
Name: "vault-port",
Protocol: corev1.ProtocolTCP,
},
{
ContainerPort: 8201,
Name: "cluster-port",
Protocol: corev1.ProtocolTCP,
},
},
Env: []corev1.EnvVar{
{
Name: "VAULT_ADDR",
Value: "http://localhost:8200",
},
{
Name: "POD_IP_ADDR",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
{
Name: "VAULT_LOCAL_CONFIG",
Value: "api_addr = \"http://127.0.0.1:8200\"\ncluster_addr = \"http://${POD_IP_ADDR}:8201\"",
},
{
Name: "VAULT_DEV_ROOT_TOKEN_ID",
Value: "root", // THIS IS NOT A PRODUCTION DEPLOYMENT OF VAULT!
},
},
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/v1/sys/health",
Port: intstr.FromInt(8200),
Scheme: corev1.URISchemeHTTP,
},
},
InitialDelaySeconds: 5,
PeriodSeconds: 5,
},
},
},
},
},
},
}
_, err := td.Client.AppsV1().Deployments(instOpts.ControlPlaneNS).Create(context.TODO(), vaultDep, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to create vault deployment")
}
vaultSvc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "vault",
Labels: map[string]string{
"app": "vault",
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeLoadBalancer,
Selector: map[string]string{
"app": "vault",
},
Ports: []corev1.ServicePort{
{
Name: "vault-port",
Port: 8200,
TargetPort: intstr.FromInt(8200),
Protocol: corev1.ProtocolTCP,
},
},
},
}
_, err = td.Client.CoreV1().Services(instOpts.ControlPlaneNS).Create(context.TODO(), vaultSvc, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to create vault service")
}
return nil
}
func (td *OsmTestData) installCertManager(instOpts InstallOSMOpts) error {
By("Installing cert-manager")
helm := &action.Configuration{}
if err := helm.Init(td.Env.RESTClientGetter(), td.OsmNamespace, "secret", td.T.Logf); err != nil {
return errors.Wrap(err, "failed to initialize helm config")
}
install := action.NewInstall(helm)
install.RepoURL = "https://charts.jetstack.io"
install.Namespace = td.OsmNamespace
install.ReleaseName = "certmanager"
install.Version = "v0.16.1"
chartPath, err := install.LocateChart("cert-manager", helmcli.New())
if err != nil {
return errors.Wrap(err, "failed to get cert-manager-chart")
}
chart, err := loader.Load(chartPath)
if err != nil {
return errors.Wrap(err, "failed to load cert-manager chart")
}
_, err = install.Run(chart, map[string]interface{}{
"installCRDs": true,
})
if err != nil {
return errors.Wrap(err, "failed to install cert-manager chart")
}
selfsigned := &v1alpha2.Issuer{
ObjectMeta: metav1.ObjectMeta{
Name: "selfsigned",
},
Spec: v1alpha2.IssuerSpec{
IssuerConfig: v1alpha2.IssuerConfig{
SelfSigned: &v1alpha2.SelfSignedIssuer{},
},
},
}
cert := &v1alpha2.Certificate{
ObjectMeta: metav1.ObjectMeta{
Name: "osm-ca",
},
Spec: v1alpha2.CertificateSpec{
IsCA: true,
Duration: &metav1.Duration{Duration: 90 * 24 * time.Hour},
SecretName: "osm-ca-bundle",
CommonName: "osm-system",
IssuerRef: cmmeta.ObjectReference{
Name: selfsigned.Name,
Kind: "Issuer",
Group: "cert-manager.io",
},
},
}
ca := &v1alpha2.Issuer{
ObjectMeta: metav1.ObjectMeta{
Name: "osm-ca",
},
Spec: v1alpha2.IssuerSpec{
IssuerConfig: v1alpha2.IssuerConfig{
CA: &v1alpha2.CAIssuer{
SecretName: "osm-ca-bundle",
},
},
},
}
if err := td.WaitForPodsRunningReady(install.Namespace, 60*time.Second, 3); err != nil {
return errors.Wrap(err, "failed to wait for cert-manager pods ready")
}
cmClient, err := certman.NewForConfig(td.RestConfig)
if err != nil {
return errors.Wrap(err, "failed to create cert-manager config")
}
_, err = cmClient.CertmanagerV1alpha2().Certificates(td.OsmNamespace).Create(context.TODO(), cert, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to create Certificate "+cert.Name)
}
_, err = cmClient.CertmanagerV1alpha2().Issuers(td.OsmNamespace).Create(context.TODO(), selfsigned, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to create Issuer "+selfsigned.Name)
}
_, err = cmClient.CertmanagerV1alpha2().Issuers(td.OsmNamespace).Create(context.TODO(), ca, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to create Issuer "+ca.Name)
}
return nil
}
// AddNsToMesh Adds monitored namespaces to the OSM mesh
func (td *OsmTestData) AddNsToMesh(sidecardInject bool, ns ...string) error {
td.T.Logf("Adding Namespaces [+%s] to the mesh", ns)
for _, namespace := range ns {
args := []string{"namespace", "add", namespace}
if !sidecardInject {
args = append(args, "--disable-sidecar-injection")
}
stdout, stderr, err := td.RunLocal(filepath.FromSlash("../../bin/osm"), args)
if err != nil {
td.T.Logf("error running osm namespace add")
td.T.Logf("stdout:\n%s", stdout)
td.T.Logf("stderr:\n%s", stderr)
return errors.Wrap(err, "failed to run osm namespace add")
}
if Td.EnableNsMetricTag {
args = []string{"metrics", "enable", "--namespace", namespace}
stdout, stderr, err = td.RunLocal(filepath.FromSlash("../../bin/osm"), args)
if err != nil {
td.T.Logf("error running osm namespace add")
td.T.Logf("stdout:\n%s", stdout)
td.T.Logf("stderr:\n%s", stderr)
return errors.Wrap(err, "failed to run osm namespace add")
}
}
}
return nil
}
// UpdateOSMConfig updates OSM configmap
func (td *OsmTestData) UpdateOSMConfig(key, value string) error {
patch := []byte(fmt.Sprintf(`{"data": {%q: %q}}`, key, value))
_, err := td.Client.CoreV1().ConfigMaps(td.OsmNamespace).Patch(context.TODO(), "osm-config", types.StrategicMergePatchType, patch, metav1.PatchOptions{})
return err
}
// CreateMultipleNs simple CreateNs for multiple NS creation
func (td *OsmTestData) CreateMultipleNs(nsName ...string) error {
for _, ns := range nsName {
err := td.CreateNs(ns, nil)
if err != nil {
return err
}
}
return nil
}
// CreateNs creates a Namespace. Will automatically add Docker registry creds if provided
func (td *OsmTestData) CreateNs(nsName string, labels map[string]string) error {
if labels == nil {
labels = make(map[string]string)
}
for k, v := range td.GetTestNamespaceSelectorMap() {
labels[k] = v
}
namespaceObj := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: nsName,
Namespace: "",
Labels: labels,
},
Status: corev1.NamespaceStatus{},
}
td.T.Logf("Creating namespace %v", nsName)
_, err := td.Client.CoreV1().Namespaces().Create(context.Background(), namespaceObj, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to create namespace "+nsName)
}
// Check if we are using any specific creds
if td.AreRegistryCredsPresent() {
td.CreateDockerRegistrySecret(nsName)
}
return nil
}
// DeleteNs deletes a test NS
func (td *OsmTestData) DeleteNs(nsName string) error {
// Delete Helm releases created in the namespace
helm := &action.Configuration{}
if err := helm.Init(td.Env.RESTClientGetter(), nsName, "secret", td.T.Logf); err != nil {
td.T.Logf("WARNING: failed to initialize helm config, skipping helm cleanup: %v", err)
} else {
list := action.NewList(helm)
list.All = true
if releases, err := list.Run(); err != nil {
td.T.Logf("WARNING: failed to list helm releases in namespace %s, skipping release cleanup: %v", nsName, err)
} else {
del := action.NewUninstall(helm)
for _, release := range releases {
if _, err := del.Run(release.Name); err != nil {
td.T.Logf("WARNING: failed to delete helm release %s in namespace %s: %v", release.Name, nsName, err)
}
}
}
}
var backgroundDelete metav1.DeletionPropagation = metav1.DeletePropagationBackground
td.T.Logf("Deleting namespace %v", nsName)
err := td.Client.CoreV1().Namespaces().Delete(context.Background(), nsName, metav1.DeleteOptions{PropagationPolicy: &backgroundDelete})
if err != nil {
return errors.Wrap(err, "failed to delete namespace "+nsName)
}
return nil
}
// WaitForNamespacesDeleted waits for the namespaces to be deleted.
// Reference impl taken from https://github.com/kubernetes/kubernetes/blob/master/test/e2e/framework/util.go#L258
func (td *OsmTestData) WaitForNamespacesDeleted(namespaces []string, timeout time.Duration) error {
By(fmt.Sprintf("Waiting for namespaces %v to vanish", namespaces))
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := td.Client.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
// RunLocal Executes command on local
func (td *OsmTestData) RunLocal(path string, args []string) (*bytes.Buffer, *bytes.Buffer, error) {
cmd := exec.Command(path, args...) // #nosec G204
stdout := bytes.NewBuffer(nil)
stderr := bytes.NewBuffer(nil)
cmd.Stdout = stdout
cmd.Stderr = stderr
td.T.Logf("Running locally '%s %s'", path, strings.Join(args, " "))
err := cmd.Run()
return stdout, stderr, err
}
// RunRemote runs command in remote container
func (td *OsmTestData) RunRemote(
ns string, podName string, containerName string,
command []string) (string, string, error) {
var stdin, stdout, stderr bytes.Buffer
req := td.Client.CoreV1().RESTClient().Post().Resource("pods").Name(podName).
Namespace(ns).SubResource("exec")
option := &corev1.PodExecOptions{
Command: command,
Container: containerName,
Stdin: true,
Stdout: true,
Stderr: true,
TTY: false,
}
scheme := runtime.NewScheme()
err := corev1.AddToScheme(scheme)
if err != nil {
return "", "", err
}
req.VersionedParams(
option,
runtime.NewParameterCodec(scheme),
)
exec, err := remotecommand.NewSPDYExecutor(td.RestConfig, "POST", req.URL())
if err != nil {
return "", "", err
}
err = exec.Stream(remotecommand.StreamOptions{
Stdin: &stdin,
Stdout: &stdout,
Stderr: &stderr,
})
if err != nil {
return "", "", err
}
return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), nil
}
// WaitForPodsRunningReady waits for a <n> number of pods on an NS to be running and ready
func (td *OsmTestData) WaitForPodsRunningReady(ns string, timeout time.Duration, nExpectedRunningPods int) error {
td.T.Logf("Wait up to %v for %d pods ready in ns [%s]...", timeout, nExpectedRunningPods, ns)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
pods, err := td.Client.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{
FieldSelector: "status.phase=Running",
})
if err != nil {
return errors.Wrap(err, "failed to list pods")
}
if len(pods.Items) < nExpectedRunningPods {
time.Sleep(time.Second)
continue
}
nReadyPods := 0
for _, pod := range pods.Items {
for _, cond := range pod.Status.Conditions {
if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue {
nReadyPods++
if nReadyPods == nExpectedRunningPods {
td.T.Logf("Finished waiting for NS [%s].", ns)
return nil
}
}
}
}
time.Sleep(time.Second)
}
return fmt.Errorf("Not all pods were Running & Ready in NS %s after %v", ns, timeout)
}
// SuccessFunction is a simple definition for a success function.
// True as success, false otherwise
type SuccessFunction func() bool
// WaitForRepeatedSuccess runs and expects a certain result for a certain operation a set number of consecutive times
// over a set amount of time.
func (td *OsmTestData) WaitForRepeatedSuccess(f SuccessFunction, minItForSuccess int, maxWaitTime time.Duration) bool {
iterations := 0
startTime := time.Now()
By(fmt.Sprintf("[WaitForRepeatedSuccess] waiting %v for %d iterations to succeed", maxWaitTime, minItForSuccess))
for time.Since(startTime) < maxWaitTime {
if f() {
iterations++
if iterations >= minItForSuccess {
return true
}
} else {
iterations = 0
}
time.Sleep(time.Second)
}
return false
}
// CleanupType identifies what triggered the cleanup
type CleanupType string
const (
// Test is to mark after-test cleanup
Test CleanupType = "test"
//Suite is to mark after-suite cleanup
Suite CleanupType = "suite"
)
// Cleanup is Used to cleanup resorces once the test is done
func (td *OsmTestData) Cleanup(ct CleanupType) {
if td.Client == nil {
// Avoid any cleanup (crash) if no test is run;
// init doesn't happen and clientsets are nil
return
}
// The condition enters to cleanup K8s resources if
// - cleanup is enabled and it's not a kind cluster
// - cleanup is enabled and it is a kind cluster, but the kind cluster will NOT be
// destroyed after this test.
// The latter is a condition to speed up and not wait for k8s resources to vanish
// if the current kind cluster has to be destroyed anyway.
if td.CleanupTest &&
(!(td.InstType == KindCluster) ||
(td.InstType == KindCluster &&
(ct == Test && !td.CleanupKindClusterBetweenTests) ||
(ct == Suite && !td.CleanupKindCluster))) {
// Use selector to refer to all namespaces used in this test
nsSelector := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(td.GetTestNamespaceSelectorMap()).String(),
}
testNs, err := td.Client.CoreV1().Namespaces().List(context.Background(), nsSelector)
if err != nil {
td.T.Fatalf("Failed to get list of test NS: %v", err)
}
for _, ns := range testNs.Items {
err := td.DeleteNs(ns.Name)
if err != nil {
td.T.Logf("Err deleting ns %s: %v", ns.Name, err)
continue
}
}
By(fmt.Sprintf("[Cleanup] waiting for %s:%d test NS cleanup", osmTest, GinkgoRandomSeed()))
if td.WaitForCleanup {
err := wait.Poll(2*time.Second, 240*time.Second,
func() (bool, error) {
nsList, err := td.Client.CoreV1().Namespaces().List(context.TODO(), nsSelector)
if err != nil {
td.T.Logf("Err waiting for ns list to disappear: %v", err)
return false, err
}
return len(nsList.Items) == 0, nil
},
)
if err != nil {
td.T.Logf("Poll err: %v", err)
}
}
}
// Kind cluster deletion, if needed
if (td.InstType == KindCluster) && td.ClusterProvider != nil {
if ct == Test && td.CleanupKindClusterBetweenTests || ct == Suite && td.CleanupKindCluster {
td.T.Logf("Deleting kind cluster: %s", td.ClusterName)
if err := td.ClusterProvider.Delete(td.ClusterName, clientcmd.RecommendedHomeFile); err != nil {
td.T.Logf("error deleting cluster: %v", err)
}
td.ClusterProvider = nil
}
}
}
//DockerConfig and other configs are docker-specific container registry secret structures.
// Most of it is taken or referenced from kubectl source itself
type DockerConfig map[string]DockerConfigEntry
// DockerConfigJSON is a struct for docker-specific config
type DockerConfigJSON struct {
Auths DockerConfig `json:"auths"`
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
}
// DockerConfigEntry is a struct for docker-specific container registry secret structures
type DockerConfigEntry struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Email string `json:"email,omitempty"`
Auth string `json:"auth,omitempty"`
}
// CreateDockerRegistrySecret creates a secret named `registrySecretName` in namespace <ns>,
// based on ctrRegistry variables
func (td *OsmTestData) CreateDockerRegistrySecret(ns string) {
secret := &corev1.Secret{}
secret.Name = registrySecretName
secret.Type = corev1.SecretTypeDockerConfigJson
secret.Data = map[string][]byte{}
dockercfgAuth := DockerConfigEntry{
Username: td.CtrRegistryUser,
Password: td.CtrRegistryPassword,
Email: "[email protected]",
Auth: base64.StdEncoding.EncodeToString([]byte(td.CtrRegistryUser + ":" + td.CtrRegistryPassword)),
}
dockerCfgJSON := DockerConfigJSON{
Auths: map[string]DockerConfigEntry{td.CtrRegistryServer: dockercfgAuth},
}
json, _ := json.Marshal(dockerCfgJSON)
secret.Data[corev1.DockerConfigJsonKey] = json
td.T.Logf("Pushing Registry secret '%s' for namespace %s... ", registrySecretName, ns)
_, err := td.Client.CoreV1().Secrets(ns).Create(context.Background(), secret, metav1.CreateOptions{})
if err != nil {
td.T.Fatalf("Could not add registry secret")
}
}
|
[
"\"CTR_REGISTRY\"",
"\"CTR_REGISTRY_USER\"",
"\"CTR_REGISTRY_PASSWORD\""
] |
[] |
[
"CTR_REGISTRY_USER",
"CTR_REGISTRY",
"CTR_REGISTRY_PASSWORD"
] |
[]
|
["CTR_REGISTRY_USER", "CTR_REGISTRY", "CTR_REGISTRY_PASSWORD"]
|
go
| 3 | 0 | |
yt_dlp/YoutubeDL.py
|
#!/usr/bin/env python3
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import copy
import datetime
import errno
import fileinput
import functools
import io
import itertools
import json
import locale
import operator
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import time
import tokenize
import traceback
import random
import unicodedata
from string import ascii_letters
from .compat import (
compat_basestring,
compat_get_terminal_size,
compat_kwargs,
compat_numeric_types,
compat_os_name,
compat_pycrypto_AES,
compat_shlex_quote,
compat_str,
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
compat_urllib_request_DataHandler,
windows_enable_vt_mode,
)
from .cookies import load_cookies
from .utils import (
age_restricted,
args_to_str,
ContentTooShortError,
date_from_str,
DateRange,
DEFAULT_OUTTMPL,
determine_ext,
determine_protocol,
DOT_DESKTOP_LINK_TEMPLATE,
DOT_URL_LINK_TEMPLATE,
DOT_WEBLOC_LINK_TEMPLATE,
DownloadError,
encode_compat_str,
encodeFilename,
EntryNotInPlaylist,
error_to_compat_str,
ExistingVideoReached,
expand_path,
ExtractorError,
float_or_none,
format_bytes,
format_field,
formatSeconds,
GeoRestrictedError,
HEADRequest,
int_or_none,
iri_to_uri,
ISO3166Utils,
LazyList,
locked_file,
make_dir,
make_HTTPS_handler,
MaxDownloadsReached,
network_exceptions,
orderedSet,
OUTTMPL_TYPES,
PagedList,
parse_filesize,
PerRequestProxyHandler,
platform_name,
Popen,
PostProcessingError,
preferredencoding,
prepend_extension,
register_socks_protocols,
RejectedVideoReached,
render_table,
replace_extension,
SameFileError,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
std_headers,
STR_FORMAT_RE_TMPL,
STR_FORMAT_TYPES,
str_or_none,
strftime_or_none,
subtitles_filename,
supports_terminal_sequences,
TERMINAL_SEQUENCES,
ThrottledDownload,
to_high_limit_path,
traverse_obj,
try_get,
UnavailableVideoError,
url_basename,
variadic,
version_tuple,
write_json_file,
write_string,
YoutubeDLCookieProcessor,
YoutubeDLHandler,
YoutubeDLRedirectHandler,
)
from .cache import Cache
from .extractor import (
gen_extractor_classes,
get_info_extractor,
_LAZY_LOADER,
_PLUGIN_CLASSES as plugin_extractors
)
from .extractor.openload import PhantomJSwrapper
from .downloader import (
FFmpegFD,
get_suitable_downloader,
shorten_protocol_name
)
from .downloader.rtmp import rtmpdump_version
from .postprocessor import (
get_postprocessor,
EmbedThumbnailPP,
FFmpegFixupDurationPP,
FFmpegFixupM3u8PP,
FFmpegFixupM4aPP,
FFmpegFixupStretchedPP,
FFmpegFixupTimestampPP,
FFmpegMergerPP,
FFmpegPostProcessor,
MoveFilesAfterDownloadPP,
_PLUGIN_CLASSES as plugin_postprocessors
)
from .update import detect_variant
from .version import __version__
if compat_os_name == 'nt':
import ctypes
class YoutubeDL(object):
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
YoutubeDL process the extracted information, possibly using a File
Downloader to download the video.
YoutubeDL objects accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params
attribute for the InfoExtractors to use. The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
videopassword: Password for accessing a video.
ap_mso: Adobe Pass multiple-system operator identifier.
ap_username: Multiple-system operator account username.
ap_password: Multiple-system operator account password.
usenetrc: Use netrc for authentication instead.
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
no_warnings: Do not print out anything for warnings.
forceprint: A list of templates to force print
forceurl: Force printing final URL. (Deprecated)
forcetitle: Force printing title. (Deprecated)
forceid: Force printing ID. (Deprecated)
forcethumbnail: Force printing thumbnail URL. (Deprecated)
forcedescription: Force printing description. (Deprecated)
forcefilename: Force printing final filename. (Deprecated)
forceduration: Force printing duration. (Deprecated)
forcejson: Force printing info_dict as JSON.
dump_single_json: Force printing the info_dict of the whole playlist
(or video) as a single JSON line.
force_write_download_archive: Force writing download archive regardless
of 'skip_download' or 'simulate'.
simulate: Do not download the video files. If unset (or None),
simulate only if listsubtitles, listformats or list_thumbnails is used
format: Video format code. see "FORMAT SELECTION" for more details.
allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
ignore_no_formats_error: Ignore "No video formats" error. Usefull for
extracting metadata even if the video is not actually
available for download (experimental)
format_sort: How to sort the video formats. see "Sorting Formats"
for more details.
format_sort_force: Force the given format_sort. see "Sorting Formats"
for more details.
allow_multiple_video_streams: Allow multiple video streams to be merged
into a single file
allow_multiple_audio_streams: Allow multiple audio streams to be merged
into a single file
check_formats Whether to test if the formats are downloadable.
Can be True (check all), False (check none)
or None (check only if requested by extractor)
paths: Dictionary of output paths. The allowed keys are 'home'
'temp' and the keys of OUTTMPL_TYPES (in utils.py)
outtmpl: Dictionary of templates for output names. Allowed keys
are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
For compatibility with youtube-dl, a single string can also be used
outtmpl_na_placeholder: Placeholder for unavailable meta fields.
restrictfilenames: Do not allow "&" and spaces in file names
trim_file_name: Limit length of filename (extension excluded)
windowsfilenames: Force the filenames to be windows compatible
ignoreerrors: Do not stop on download/postprocessing errors.
Can be 'only_download' to ignore only download errors.
Default is 'only_download' for CLI, but False for API
skip_playlist_after_errors: Number of allowed failures until the rest of
the playlist is skipped
force_generic_extractor: Force downloader to use the generic extractor
overwrites: Overwrite all video and metadata files if True,
overwrite only non-video files if None
and don't overwrite any file if False
For compatibility with youtube-dl,
"nooverwrites" may also be used instead
playliststart: Playlist item to start at.
playlistend: Playlist item to end at.
playlist_items: Specific indices of playlist to download.
playlistreverse: Download playlist items in reverse order.
playlistrandom: Download playlist items in random order.
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
logger: Log messages to a logging.Logger instance.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
clean_infojson: Remove private fields from the infojson
getcomments: Extract video comments. This will not be written to disk
unless writeinfojson is also given
writeannotations: Write the video annotations to a .annotations.xml file
writethumbnail: Write the thumbnail image to a file
allow_playlist_files: Whether to write playlists' description, infojson etc
also to disk when using the 'write*' options
write_all_thumbnails: Write all thumbnail formats to files
writelink: Write an internet shortcut file, depending on the
current platform (.url/.webloc/.desktop)
writeurllink: Write a Windows internet shortcut file (.url)
writewebloclink: Write a macOS internet shortcut file (.webloc)
writedesktoplink: Write a Linux internet shortcut file (.desktop)
writesubtitles: Write the video subtitles to a file
writeautomaticsub: Write the automatically generated subtitles to a file
allsubtitles: Deprecated - Use subtitleslangs = ['all']
Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
listsubtitles: Lists all available subtitles for the video
subtitlesformat: The format code for subtitles
subtitleslangs: List of languages of the subtitles to download (can be regex).
The list may contain "all" to refer to all the available
subtitles. The language can be prefixed with a "-" to
exclude it from the requested languages. Eg: ['all', '-live_chat']
keepvideo: Keep the video file after post-processing
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded
again.
break_on_existing: Stop the download process after attempting to download a
file that is in the archive.
break_on_reject: Stop the download process when encountering a video that
has been filtered out.
cookiefile: File name where cookies should be read from and dumped to
cookiesfrombrowser: A tuple containing the name of the browser and the profile
name/path from where cookies are loaded.
Eg: ('chrome', ) or (vivaldi, 'default')
nocheckcertificate:Do not verify SSL certificates
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
At the moment, this is only supported by YouTube.
proxy: URL of the proxy server to use
geo_verification_proxy: URL of the proxy to use for IP address verification
on geo-restricted sites.
socket_timeout: Time to wait for unresponsive hosts, in seconds
bidi_workaround: Work around buggy terminals without bidirectional text
support, using fridibi
debug_printtraffic:Print out sent and received HTTP traffic
include_ads: Download ads as well
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
extract_flat: Do not resolve URLs, return the immediate result.
Pass in 'in_playlist' to only show this behavior for
playlist items.
postprocessors: A list of dictionaries, each with an entry
* key: The name of the postprocessor. See
yt_dlp/postprocessor/__init__.py for a list.
* when: When to run the postprocessor. Can be one of
pre_process|before_dl|post_process|after_move.
Assumed to be 'post_process' if not given
post_hooks: Deprecated - Register a custom postprocessor instead
A list of functions that get called as the final step
for each video file, after all postprocessors have been
called. The filename will be passed as the only argument.
progress_hooks: A list of functions that get called on download
progress, with a dictionary with the entries
* status: One of "downloading", "error", or "finished".
Check this first and ignore unknown values.
* info_dict: The extracted info_dict
If status is one of "downloading", or "finished", the
following properties may also be present:
* filename: The final filename (always present)
* tmpfilename: The filename we're currently writing to
* downloaded_bytes: Bytes on disk
* total_bytes: Size of the whole file, None if unknown
* total_bytes_estimate: Guess of the eventual file size,
None if unavailable.
* elapsed: The number of seconds since download started.
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if
unknown
* fragment_index: The counter of the currently
downloaded video fragment.
* fragment_count: The number of fragments (= individual
files that will be merged)
Progress hooks are guaranteed to be called at least once
(with status "finished") if the download is successful.
postprocessor_hooks: A list of functions that get called on postprocessing
progress, with a dictionary with the entries
* status: One of "started", "processing", or "finished".
Check this first and ignore unknown values.
* postprocessor: Name of the postprocessor
* info_dict: The extracted info_dict
Progress hooks are guaranteed to be called at least twice
(with status "started" and "finished") if the processing is successful.
merge_output_format: Extension to use when merging formats.
final_ext: Expected final extension; used to detect when the file was
already downloaded and converted. "merge_output_format" is
replaced by this extension when given
fixup: Automatically correct known faults of the file.
One of:
- "never": do nothing
- "warn": only emit a warning
- "detect_or_warn": check whether we can do anything
about it, warn otherwise (default)
source_address: Client-side IP address to bind to.
call_home: Boolean, true iff we are allowed to contact the
yt-dlp servers for debugging. (BROKEN)
sleep_interval_requests: Number of seconds to sleep between requests
during extraction
sleep_interval: Number of seconds to sleep before each download when
used alone or a lower bound of a range for randomized
sleep before each download (minimum possible number
of seconds to sleep) when used along with
max_sleep_interval.
max_sleep_interval:Upper bound of a range for randomized sleep before each
download (maximum possible number of seconds to sleep).
Must only be used along with sleep_interval.
Actual sleep time will be a random float from range
[sleep_interval; max_sleep_interval].
sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
listformats: Print an overview of available video formats and exit.
list_thumbnails: Print a table of all thumbnails and exit.
match_filter: A function that gets called with the info_dict of
every video.
If it returns a message, the video is ignored.
If it returns None, the video is downloaded.
match_filter_func in utils.py is one example for this.
no_color: Do not emit color codes in output.
geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
HTTP header
geo_bypass_country:
Two-letter ISO 3166-2 country code that will be used for
explicit geographic restriction bypassing via faking
X-Forwarded-For HTTP header
geo_bypass_ip_block:
IP range in CIDR notation that will be used similarly to
geo_bypass_country
The following options determine which downloader is picked:
external_downloader: A dictionary of protocol keys and the executable of the
external downloader to use for it. The allowed protocols
are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
Set the value to 'native' to use the native downloader
hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
or {'m3u8': 'ffmpeg'} instead.
Use the native HLS downloader instead of ffmpeg/avconv
if True, otherwise use ffmpeg/avconv if False, otherwise
use downloader suggested by extractor if None.
compat_opts: Compatibility options. See "Differences in default behavior".
The following options do not work when used through the API:
filename, abort-on-error, multistreams, no-live-chat, format-sort
no-clean-infojson, no-playlist-metafiles, no-keep-subs.
Refer __init__.py for their implementation
progress_template: Dictionary of templates for progress outputs.
Allowed keys are 'download', 'postprocess',
'download-title' (console title) and 'postprocess-title'.
The template is mapped on a dictionary with keys 'progress' and 'info'
The following parameters are not used by YoutubeDL itself, they are used by
the downloader (see yt_dlp/downloader/common.py):
nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
max_filesize, test, noresizebuffer, retries, fragment_retries, continuedl,
noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
external_downloader_args.
The following options are used by the post processors:
prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
otherwise prefer ffmpeg. (avconv support is deprecated)
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
to the binary or its containing directory.
postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
and a list of additional command-line arguments for the
postprocessor/executable. The dict can also have "PP+EXE" keys
which are used when the given exe is used by the given PP.
Use 'default' as the name for arguments to passed to all PP
For compatibility with youtube-dl, a single list of args
can also be used
The following options are used by the extractors:
extractor_retries: Number of times to retry for known errors
dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
hls_split_discontinuity: Split HLS playlists to different formats at
discontinuities such as ad breaks (default: False)
extractor_args: A dictionary of arguments to be passed to the extractors.
See "EXTRACTOR ARGUMENTS" for details.
Eg: {'youtube': {'skip': ['dash', 'hls']}}
youtube_include_dash_manifest: Deprecated - Use extractor_args instead.
If True (default), DASH manifests and related
data will be downloaded and processed by extractor.
You can reduce network I/O by disabling it if you don't
care about DASH. (only for youtube)
youtube_include_hls_manifest: Deprecated - Use extractor_args instead.
If True (default), HLS manifests and related
data will be downloaded and processed by extractor.
You can reduce network I/O by disabling it if you don't
care about HLS. (only for youtube)
"""
_NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'release_timestamp',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
))
_format_selection_exts = {
'audio': {'m4a', 'mp3', 'ogg', 'aac'},
'video': {'mp4', 'flv', 'webm', '3gp'},
'storyboards': {'mhtml'},
}
params = None
_ies = {}
_pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
_printed_messages = set()
_first_webpage_request = True
_download_retcode = None
_num_downloads = None
_playlist_level = 0
_playlist_urls = set()
_screen_file = None
def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options.
@param auto_init Whether to load the default extractors and print header (if verbose).
Set to 'no_verbose_header' to not ptint the header
"""
if params is None:
params = {}
self._ies = {}
self._ies_instances = {}
self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
self._printed_messages = set()
self._first_webpage_request = True
self._post_hooks = []
self._progress_hooks = []
self._postprocessor_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = params
self.cache = Cache(self)
windows_enable_vt_mode()
# FIXME: This will break if we ever print color to stdout
self.params['no_color'] = self.params.get('no_color') or not supports_terminal_sequences(self._err_file)
if sys.version_info < (3, 6):
self.report_warning(
'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
if self.params.get('allow_unplayable_formats'):
self.report_warning(
f'You have asked for {self._color_text("unplayable formats", "blue")} to be listed/downloaded. '
'This is a developer option intended for debugging. \n'
' If you experience any issues while using this option, '
f'{self._color_text("DO NOT", "red")} open a bug report')
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion))
return True
return False
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
if self.params.get('geo_verification_proxy') is None:
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
for msg in self.params.get('warnings', []):
self.report_warning(msg)
if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
# nooverwrites was unnecessarily changed to overwrites
# in 0c3d0f51778b153f65c21906031c2e091fcfb641
# This ensures compatibility with both keys
self.params['overwrites'] = not self.params['nooverwrites']
elif self.params.get('overwrites') is None:
self.params.pop('overwrites', None)
else:
self.params['nooverwrites'] = not self.params['overwrites']
if params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = compat_get_terminal_size().columns
if width is None:
width_args = []
else:
width_args = ['-w', str(width)]
sp_kwargs = dict(
stdin=subprocess.PIPE,
stdout=slave,
stderr=self._err_file)
try:
self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
except OSError:
self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == errno.ENOENT:
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if (sys.platform != 'win32'
and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not params.get('restrictfilenames', False)):
# Unicode filesystem API will throw errors (#1474, #13027)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
self.outtmpl_dict = self.parse_outtmpl()
# Creating format selector here allows us to catch syntax errors before the extraction
self.format_selector = (
None if self.params.get('format') is None
else self.build_format_selector(self.params['format']))
self._setup_opener()
if auto_init:
if auto_init != 'no_verbose_header':
self.print_debug_header()
self.add_default_info_extractors()
for pp_def_raw in self.params.get('postprocessors', []):
pp_def = dict(pp_def_raw)
when = pp_def.pop('when', 'post_process')
pp_class = get_postprocessor(pp_def.pop('key'))
pp = pp_class(self, **compat_kwargs(pp_def))
self.add_post_processor(pp, when=when)
for ph in self.params.get('post_hooks', []):
self.add_post_hook(ph)
for ph in self.params.get('progress_hooks', []):
self.add_progress_hook(ph)
register_socks_protocols()
def preload_download_archive(fn):
"""Preload the archive, if any is specified"""
if fn is None:
return False
self.write_debug('Loading archive file %r\n' % fn)
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
self.archive.add(line.strip())
except IOError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
return True
self.archive = set()
preload_download_archive(self.params.get('download_archive'))
def warn_if_short_id(self, argv):
# short YouTube ID starting with dash?
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['yt-dlp']
+ [a for i, a in enumerate(argv) if i not in idxs]
+ ['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s\n' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
ie_key = ie.ie_key()
self._ies[ie_key] = ie
if not isinstance(ie, type):
self._ies_instances[ie_key] = ie
ie.set_downloader(self)
def _get_info_extractor_class(self, ie_key):
ie = self._ies.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)
self.add_info_extractor(ie)
return ie
def get_info_extractor(self, ie_key):
"""
Get an instance of an IE with name ie_key, it will try to get one from
the _ies list, if there's no instance it will create a new one and add
it to the extractor list.
"""
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractor_classes():
self.add_info_extractor(ie)
def add_post_processor(self, pp, when='post_process'):
"""Add a PostProcessor object to the end of the chain."""
self._pps[when].append(pp)
pp.set_downloader(self)
def add_post_hook(self, ph):
"""Add the post hook"""
self._post_hooks.append(ph)
def add_progress_hook(self, ph):
"""Add the download progress hook"""
self._progress_hooks.append(ph)
def add_postprocessor_hook(self, ph):
"""Add the postprocessing progress hook"""
self._postprocessor_hooks.append(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
return res[:-len('\n')]
def _write_string(self, message, out=None, only_once=False):
if only_once:
if message in self._printed_messages:
return
self._printed_messages.add(message)
write_string(message, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, quiet=False):
"""Print message to stdout"""
if self.params.get('logger'):
self.params['logger'].debug(message)
elif not quiet or self.params.get('verbose'):
self._write_string(
'%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
self._err_file if quiet else self._screen_file)
def to_stderr(self, message, only_once=False):
"""Print message to stderr"""
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
if compat_os_name == 'nt':
if ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
self._write_string('\033]0;%s\007' % message, self._screen_file)
def save_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate'):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Save the title on stack
self._write_string('\033[22;0t', self._screen_file)
def restore_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate'):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Restore the title from stack
self._write_string('\033[23;0t', self._screen_file)
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save(ignore_discard=True, ignore_expires=True)
def trouble(self, message=None, tb=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
not when errors are found, after printing the message.
tb, if given, is additional traceback information.
"""
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
if tb:
self.to_stderr(tb)
if not self.params.get('ignoreerrors'):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def to_screen(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode"""
self.to_stdout(
message, skip_eol, quiet=self.params.get('quiet', False))
def _color_text(self, text, color):
if self.params.get('no_color'):
return text
return f'{TERMINAL_SEQUENCES[color.upper()]}{text}{TERMINAL_SEQUENCES["RESET_STYLE"]}'
def report_warning(self, message, only_once=False):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
self.to_stderr(f'{self._color_text("WARNING:", "yellow")} {message}', only_once)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
self.trouble(f'{self._color_text("ERROR:", "red")} {message}', tb)
def write_debug(self, message, only_once=False):
'''Log debug message or Print message to stderr'''
if not self.params.get('verbose', False):
return
message = '[debug] %s' % message
if self.params.get('logger'):
self.params['logger'].debug(message)
else:
self.to_stderr(message, only_once)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_file_delete(self, file_name):
"""Report that existing file will be deleted."""
try:
self.to_screen('Deleting existing file %s' % file_name)
except UnicodeEncodeError:
self.to_screen('Deleting existing file')
def raise_no_formats(self, info, forced=False):
has_drm = info.get('__has_drm')
msg = 'This video is DRM protected' if has_drm else 'No video formats found!'
expected = self.params.get('ignore_no_formats_error')
if forced or not expected:
raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
expected=has_drm or expected)
else:
self.report_warning(msg)
def parse_outtmpl(self):
outtmpl_dict = self.params.get('outtmpl', {})
if not isinstance(outtmpl_dict, dict):
outtmpl_dict = {'default': outtmpl_dict}
# Remove spaces in the default template
if self.params.get('restrictfilenames'):
sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
else:
sanitize = lambda x: x
outtmpl_dict.update({
k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items()
if outtmpl_dict.get(k) is None})
for key, val in outtmpl_dict.items():
if isinstance(val, bytes):
self.report_warning(
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
return outtmpl_dict
def get_output_path(self, dir_type='', filename=None):
paths = self.params.get('paths', {})
assert isinstance(paths, dict)
path = os.path.join(
expand_path(paths.get('home', '').strip()),
expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
filename or '')
# Temporary fix for #4787
# 'Treat' all problem characters by passing filename through preferredencoding
# to workaround encoding issues with subprocess on python2 @ Windows
if sys.version_info < (3, 0) and sys.platform == 'win32':
path = encodeFilename(path, True).decode(preferredencoding())
return sanitize_path(path, force=self.params.get('windowsfilenames'))
@staticmethod
def _outtmpl_expandpath(outtmpl):
# expand_path translates '%%' into '%' and '$$' into '$'
# correspondingly that is not what we want since we need to keep
# '%%' intact for template dict substitution step. Working around
# with boundary-alike separator hack.
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
# outtmpl should be expand_path'ed before template dict substitution
# because meta fields may contain env variables we don't want to
# be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
# title "Hello $PATH", we don't want `$PATH` to be expanded.
return expand_path(outtmpl).replace(sep, '')
@staticmethod
def escape_outtmpl(outtmpl):
''' Escape any remaining strings like %s, %abc% etc. '''
return re.sub(
STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
outtmpl)
@classmethod
def validate_outtmpl(cls, outtmpl):
''' @return None or Exception object '''
outtmpl = re.sub(
STR_FORMAT_RE_TMPL.format('[^)]*', '[ljqBU]'),
lambda mobj: f'{mobj.group(0)[:-1]}s',
cls._outtmpl_expandpath(outtmpl))
try:
cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
return None
except ValueError as err:
return err
@staticmethod
def _copy_infodict(info_dict):
info_dict = dict(info_dict)
for key in ('__original_infodict', '__postprocessors'):
info_dict.pop(key, None)
return info_dict
def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
""" Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict """
info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
info_dict = self._copy_infodict(info_dict)
info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
formatSeconds(info_dict['duration'], '-' if sanitize else ':')
if info_dict.get('duration', None) is not None
else None)
info_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
if info_dict.get('resolution') is None:
info_dict['resolution'] = self.format_resolution(info_dict, default=None)
# For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': len(str(info_dict.get('_last_playlist_index') or '')),
'playlist_autonumber': len(str(info_dict.get('n_entries') or '')),
'autonumber': self.params.get('autonumber_size') or 5,
}
TMPL_DICT = {}
EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljqBU]'))
MATH_FUNCTIONS = {
'+': float.__add__,
'-': float.__sub__,
}
# Field is of the form key1.key2...
# where keys (except first) can be string, int or slice
FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
MATH_FIELD_RE = r'''{field}|{num}'''.format(field=FIELD_RE, num=r'-?\d+(?:.\d+)?')
MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
INTERNAL_FORMAT_RE = re.compile(r'''(?x)
(?P<negate>-)?
(?P<fields>{field})
(?P<maths>(?:{math_op}{math_field})*)
(?:>(?P<strf_format>.+?))?
(?P<alternate>(?<!\\),[^|)]+)?
(?:\|(?P<default>.*?))?
$'''.format(field=FIELD_RE, math_op=MATH_OPERATORS_RE, math_field=MATH_FIELD_RE))
def _traverse_infodict(k):
k = k.split('.')
if k[0] == '':
k.pop(0)
return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
def get_value(mdict):
# Object traversal
value = _traverse_infodict(mdict['fields'])
# Negative
if mdict['negate']:
value = float_or_none(value)
if value is not None:
value *= -1
# Do maths
offset_key = mdict['maths']
if offset_key:
value = float_or_none(value)
operator = None
while offset_key:
item = re.match(
MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
offset_key).group(0)
offset_key = offset_key[len(item):]
if operator is None:
operator = MATH_FUNCTIONS[item]
continue
item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
offset = float_or_none(item)
if offset is None:
offset = float_or_none(_traverse_infodict(item))
try:
value = operator(value, multiplier * offset)
except (TypeError, ZeroDivisionError):
return None
operator = None
# Datetime formatting
if mdict['strf_format']:
value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
return value
na = self.params.get('outtmpl_na_placeholder', 'NA')
def _dumpjson_default(obj):
if isinstance(obj, (set, LazyList)):
return list(obj)
raise TypeError(f'Object of type {type(obj).__name__} is not JSON serializable')
def create_key(outer_mobj):
if not outer_mobj.group('has_key'):
return outer_mobj.group(0)
key = outer_mobj.group('key')
mobj = re.match(INTERNAL_FORMAT_RE, key)
initial_field = mobj.group('fields').split('.')[-1] if mobj else ''
value, default = None, na
while mobj:
mobj = mobj.groupdict()
default = mobj['default'] if mobj['default'] is not None else default
value = get_value(mobj)
if value is None and mobj['alternate']:
mobj = re.match(INTERNAL_FORMAT_RE, mobj['alternate'][1:])
else:
break
fmt = outer_mobj.group('format')
if fmt == 's' and value is not None and key in field_size_compat_map.keys():
fmt = '0{:d}d'.format(field_size_compat_map[key])
value = default if value is None else value
str_fmt = f'{fmt[:-1]}s'
if fmt[-1] == 'l': # list
delim = '\n' if '#' in (outer_mobj.group('conversion') or '') else ', '
value, fmt = delim.join(variadic(value)), str_fmt
elif fmt[-1] == 'j': # json
value, fmt = json.dumps(value, default=_dumpjson_default), str_fmt
elif fmt[-1] == 'q': # quoted
value, fmt = compat_shlex_quote(str(value)), str_fmt
elif fmt[-1] == 'B': # bytes
value = f'%{str_fmt}'.encode('utf-8') % str(value).encode('utf-8')
value, fmt = value.decode('utf-8', 'ignore'), 's'
elif fmt[-1] == 'U': # unicode normalized
opts = outer_mobj.group('conversion') or ''
value, fmt = unicodedata.normalize(
# "+" = compatibility equivalence, "#" = NFD
'NF%s%s' % ('K' if '+' in opts else '', 'D' if '#' in opts else 'C'),
value), str_fmt
elif fmt[-1] == 'c':
if value:
value = str(value)[0]
else:
fmt = str_fmt
elif fmt[-1] not in 'rs': # numeric
value = float_or_none(value)
if value is None:
value, fmt = default, 's'
if sanitize:
if fmt[-1] == 'r':
# If value is an object, sanitize might convert it to a string
# So we convert it to repr first
value, fmt = repr(value), str_fmt
if fmt[-1] in 'csr':
value = sanitize(initial_field, value)
key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
TMPL_DICT[key] = value
return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
return self.escape_outtmpl(outtmpl) % info_dict
def _prepare_filename(self, info_dict, tmpl_type='default'):
try:
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get('restrictfilenames'),
is_id=(k == 'id' or k.endswith('_id')))
outtmpl = self._outtmpl_expandpath(self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default']))
filename = self.evaluate_outtmpl(outtmpl, info_dict, sanitize)
force_ext = OUTTMPL_TYPES.get(tmpl_type)
if filename and force_ext is not None:
filename = replace_extension(filename, force_ext, info_dict.get('ext'))
# https://github.com/blackjack4494/youtube-dlc/issues/85
trim_file_name = self.params.get('trim_file_name', False)
if trim_file_name:
fn_groups = filename.rsplit('.')
ext = fn_groups[-1]
sub_ext = ''
if len(fn_groups) > 2:
sub_ext = fn_groups[-2]
filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
return filename
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def prepare_filename(self, info_dict, dir_type='', warn=False):
"""Generate the output filename."""
filename = self._prepare_filename(info_dict, dir_type or 'default')
if not filename and dir_type not in ('', 'temp'):
return ''
if warn:
if not self.params.get('paths'):
pass
elif filename == '-':
self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
elif os.path.isabs(filename):
self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
if filename == '-' or not filename:
return filename
return self.get_output_path(dir_type, filename)
def _match_entry(self, info_dict, incomplete=False, silent=False):
""" Returns None if the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', 'video'))
def check_filter():
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title
match_filter = self.params.get('match_filter')
if match_filter is not None:
try:
ret = match_filter(info_dict, incomplete=incomplete)
except TypeError:
# For backward compatibility
ret = None if incomplete else match_filter(info_dict)
if ret is not None:
return ret
return None
if self.in_download_archive(info_dict):
reason = '%s has already been recorded in the archive' % video_title
break_opt, break_err = 'break_on_existing', ExistingVideoReached
else:
reason = check_filter()
break_opt, break_err = 'break_on_reject', RejectedVideoReached
if reason is not None:
if not silent:
self.to_screen('[download] ' + reason)
if self.params.get(break_opt, False):
raise break_err()
return reason
@staticmethod
def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing'''
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info=None,
process=True, force_generic_extractor=False):
"""
Return a list with a dictionary for each video extracted.
Arguments:
url -- URL to extract
Keyword arguments:
download -- whether to download videos during extraction
ie_key -- extractor key hint
extra_info -- dictionary containing the extra values to add to each result
process -- whether to resolve all unresolved references (URLs, playlist items),
must be True for download to work.
force_generic_extractor -- force using the generic extractor
"""
if extra_info is None:
extra_info = {}
if not ie_key and force_generic_extractor:
ie_key = 'Generic'
if ie_key:
ies = {ie_key: self._get_info_extractor_class(ie_key)}
else:
ies = self._ies
for ie_key, ie in ies.items():
if not ie.suitable(url):
continue
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
temp_id = ie.get_temp_id(url)
if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
self.to_screen("[%s] %s: has already been recorded in archive" % (
ie_key, temp_id))
break
return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def __handle_extraction_exceptions(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except GeoRestrictedError as e:
msg = e.msg
if e.countries:
msg += '\nThis video is available in %s.' % ', '.join(
map(ISO3166Utils.short2full, e.countries))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg)
except ExtractorError as e: # An error we somewhat expected
self.report_error(compat_str(e), e.format_traceback())
except ThrottledDownload:
self.to_stderr('\r')
self.report_warning('The download speed is below throttle limit. Re-extracting data')
return wrapper(self, *args, **kwargs)
except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached, LazyList.IndexError):
raise
except Exception as e:
if self.params.get('ignoreerrors'):
self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
else:
raise
return wrapper
@__handle_extraction_exceptions
def __extract_info(self, url, ie, download, extra_info, process):
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
return
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
if extra_info.get('original_url'):
ie_result.setdefault('original_url', extra_info['original_url'])
self.add_default_extra_info(ie_result, ie, url)
if process:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
def add_default_extra_info(self, ie_result, ie, url):
if url is not None:
self.add_extra_info(ie_result, {
'webpage_url': url,
'original_url': url,
'webpage_url_basename': url_basename(url),
})
if ie is not None:
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info=None):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
if extra_info is None:
extra_info = {}
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
if ie_result.get('original_url'):
extra_info.setdefault('original_url', ie_result['original_url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
or extract_flat is True):
info_copy = ie_result.copy()
ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
if ie and not ie_result.get('id'):
info_copy['id'] = ie.get_temp_id(ie_result['url'])
self.add_default_extra_info(info_copy, ie, ie_result['url'])
self.add_extra_info(info_copy, extra_info)
self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
if self.params.get('force_write_download_archive', False):
self.record_download_archive(info_copy)
return ie_result
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
ie_result = self.process_video_result(ie_result, download=download)
additional_urls = (ie_result or {}).get('additional_urls')
if additional_urls:
# TODO: Improve MetadataParserPP to allow setting a list
if isinstance(additional_urls, compat_str):
additional_urls = [additional_urls]
self.to_screen(
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
ie_result['additional_entries'] = [
self.extract_info(
url, download, extra_info,
force_generic_extractor=self.params.get('force_generic_extractor'))
for url in additional_urls
]
return ie_result
elif result_type == 'url':
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(
ie_result['url'], download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
# Use the information from the embedding page
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info:
return info
force_properties = dict(
(k, v) for k, v in ie_result.items() if v is not None)
for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
if new_result.get('_type') == 'url':
new_result['_type'] = 'url_transparent'
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type in ('playlist', 'multi_video'):
# Protect from infinite recursion due to recursively nested playlists
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
webpage_url = ie_result['webpage_url']
if webpage_url in self._playlist_urls:
self.to_screen(
'[download] Skipping already downloaded playlist: %s'
% ie_result.get('title') or ie_result.get('id'))
return
self._playlist_level += 1
self._playlist_urls.add(webpage_url)
self._sanitize_thumbnails(ie_result)
try:
return self.__process_playlist(ie_result, download)
finally:
self._playlist_level -= 1
if not self._playlist_level:
self._playlist_urls.clear()
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(r, {
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
})
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
def _ensure_dir_exists(self, path):
return make_dir(path, self.report_error)
def __process_playlist(self, ie_result, download):
# We process each entry in the playlist
playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
if 'entries' not in ie_result:
raise EntryNotInPlaylist()
incomplete_entries = bool(ie_result.get('requested_entries'))
if incomplete_entries:
def fill_missing_entries(entries, indexes):
ret = [None] * max(*indexes)
for i, entry in zip(indexes, entries):
ret[i - 1] = entry
return ret
ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
playlist_results = []
playliststart = self.params.get('playliststart', 1)
playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(','):
if '-' in string_segment:
start, end = string_segment.split('-')
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
ie_entries = ie_result['entries']
msg = (
'Downloading %d videos' if not isinstance(ie_entries, list)
else 'Collected %d videos; downloading %%d of them' % len(ie_entries))
if isinstance(ie_entries, list):
def get_entry(i):
return ie_entries[i - 1]
else:
if not isinstance(ie_entries, PagedList):
ie_entries = LazyList(ie_entries)
def get_entry(i):
return YoutubeDL.__handle_extraction_exceptions(
lambda self, i: ie_entries[i - 1]
)(self, i)
entries = []
items = playlistitems if playlistitems is not None else itertools.count(playliststart)
for i in items:
if i == 0:
continue
if playlistitems is None and playlistend is not None and playlistend < i:
break
entry = None
try:
entry = get_entry(i)
if entry is None:
raise EntryNotInPlaylist()
except (IndexError, EntryNotInPlaylist):
if incomplete_entries:
raise EntryNotInPlaylist()
elif not playlistitems:
break
entries.append(entry)
try:
if entry is not None:
self._match_entry(entry, incomplete=True, silent=True)
except (ExistingVideoReached, RejectedVideoReached):
break
ie_result['entries'] = entries
# Save playlist_index before re-ordering
entries = [
((playlistitems[i - 1] if playlistitems else i + playliststart - 1), entry)
for i, entry in enumerate(entries, 1)
if entry is not None]
n_entries = len(entries)
if not playlistitems and (playliststart or playlistend):
playlistitems = list(range(playliststart, playliststart + n_entries))
ie_result['requested_entries'] = playlistitems
if self.params.get('allow_playlist_files', True):
ie_copy = {
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'playlist_index': 0,
}
ie_copy.update(dict(ie_result))
if self._write_info_json('playlist', ie_result,
self.prepare_filename(ie_copy, 'pl_infojson')) is None:
return
if self._write_description('playlist', ie_result,
self.prepare_filename(ie_copy, 'pl_description')) is None:
return
# TODO: This should be passed to ThumbnailsConvertor if necessary
self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
if self.params.get('playlistreverse', False):
entries = entries[::-1]
if self.params.get('playlistrandom', False):
random.shuffle(entries)
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
failures = 0
max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
for i, entry_tuple in enumerate(entries, 1):
playlist_index, entry = entry_tuple
if 'playlist-index' in self.params.get('compat_opts', []):
playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry['__x_forwarded_for_ip'] = x_forwarded_for
extra = {
'n_entries': n_entries,
'_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
'playlist_index': playlist_index,
'playlist_autonumber': i,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
if self._match_entry(entry, incomplete=True) is not None:
continue
entry_result = self.__process_iterable_entry(entry, download, extra)
if not entry_result:
failures += 1
if failures >= max_failures:
self.report_error(
'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
break
# TODO: skip failed (empty) entries?
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
return ie_result
@__handle_extraction_exceptions
def __process_iterable_entry(self, entry, download, extra_info):
return self.process_ie_result(
entry, download=download, extra_info=extra_info)
def _build_format_filter(self, filter_spec):
" Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
''' % '|'.join(map(re.escape, OPERATORS.keys())))
m = operator_rex.fullmatch(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
except ValueError:
comparison_value = parse_filesize(m.group('value'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
STR_OPERATORS = {
'=': operator.eq,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr,
}
str_operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-zA-Z0-9._-]+)\s*
(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[a-zA-Z0-9._-]+)\s*
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.fullmatch(filter_spec)
if m:
comparison_value = m.group('value')
str_op = STR_OPERATORS[m.group('op')]
if m.group('negation'):
op = lambda attr, value: not str_op(attr, value)
else:
op = str_op
if not m:
raise SyntaxError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
return _filter
def _default_format_spec(self, info_dict, download=True):
def can_merge():
merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge()
prefer_best = (
not self.params.get('simulate')
and download
and (
not can_merge()
or info_dict.get('is_live', False)
or self.outtmpl_dict['default'] == '-'))
compat = (
prefer_best
or self.params.get('allow_multiple_audio_streams', False)
or 'format-spec' in self.params.get('compat_opts', []))
return (
'best/bestvideo+bestaudio' if prefer_best
else 'bestvideo*+bestaudio/best' if not compat
else 'bestvideo+bestaudio/best')
def build_format_selector(self, format_spec):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
MERGE = 'MERGE'
SINGLE = 'SINGLE'
GROUP = 'GROUP'
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
'video': self.params.get('allow_multiple_video_streams', False)}
check_formats = self.params.get('check_formats')
def _parse_filter(tokens):
filter_parts = []
for type, string, start, _, _ in tokens:
if type == tokenize.OP and string == ']':
return ''.join(filter_parts)
else:
filter_parts.append(string)
def _remove_unused_ops(tokens):
# Remove operators that we don't use and join them with the surrounding strings
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
for type, string, start, end, line in tokens:
if type == tokenize.OP and string == '[':
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
# everything inside brackets will be handled by _parse_filter
for type, string, start, end, line in tokens:
yield type, string, start, end, line
if type == tokenize.OP and string == ']':
break
elif type == tokenize.OP and string in ALLOWED_OPS:
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string:
last_string = string
last_start = start
last_end = end
else:
last_string += string
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = []
current_selector = None
for type, string, start, _, _ in tokens:
# ENCODING is only defined in python 3.x
if type == getattr(tokenize, 'ENCODING', None):
continue
elif type in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string, [])
elif type == tokenize.OP:
if string == ')':
if not inside_group:
# ')' will be handled by the parentheses group
tokens.restore_last_token()
break
elif inside_merge and string in ['/', ',']:
tokens.restore_last_token()
break
elif inside_choice and string == ',':
tokens.restore_last_token()
break
elif string == ',':
if not current_selector:
raise syntax_error('"," must follow a format selector', start)
selectors.append(current_selector)
current_selector = None
elif string == '/':
if not current_selector:
raise syntax_error('"/" must follow a format selector', start)
first_choice = current_selector
second_choice = _parse_format_selection(tokens, inside_choice=True)
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
elif string == '[':
if not current_selector:
current_selector = FormatSelector(SINGLE, 'best', [])
format_filter = _parse_filter(tokens)
current_selector.filters.append(format_filter)
elif string == '(':
if current_selector:
raise syntax_error('Unexpected "("', start)
group = _parse_format_selection(tokens, inside_group=True)
current_selector = FormatSelector(GROUP, group, [])
elif string == '+':
if not current_selector:
raise syntax_error('Unexpected "+"', start)
selector_1 = current_selector
selector_2 = _parse_format_selection(tokens, inside_merge=True)
if not selector_2:
raise syntax_error('Expected a selector', start)
current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
selectors.append(current_selector)
return selectors
def _merge(formats_pair):
format_1, format_2 = formats_pair
formats_info = []
formats_info.extend(format_1.get('requested_formats', (format_1,)))
formats_info.extend(format_2.get('requested_formats', (format_2,)))
if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
get_no_more = {'video': False, 'audio': False}
for (i, fmt_info) in enumerate(formats_info):
if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
formats_info.pop(i)
continue
for aud_vid in ['audio', 'video']:
if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
if get_no_more[aud_vid]:
formats_info.pop(i)
break
get_no_more[aud_vid] = True
if len(formats_info) == 1:
return formats_info[0]
video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
output_ext = self.params.get('merge_output_format')
if not output_ext:
if the_only_video:
output_ext = the_only_video['ext']
elif the_only_audio and not video_fmts:
output_ext = the_only_audio['ext']
else:
output_ext = 'mkv'
filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
new_dict = {
'requested_formats': formats_info,
'format': '+'.join(filtered('format')),
'format_id': '+'.join(filtered('format_id')),
'ext': output_ext,
'protocol': '+'.join(map(determine_protocol, formats_info)),
'language': '+'.join(orderedSet(filtered('language'))),
'format_note': '+'.join(orderedSet(filtered('format_note'))),
'filesize_approx': sum(filtered('filesize', 'filesize_approx')),
'tbr': sum(filtered('tbr', 'vbr', 'abr')),
}
if the_only_video:
new_dict.update({
'width': the_only_video.get('width'),
'height': the_only_video.get('height'),
'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
'fps': the_only_video.get('fps'),
'vcodec': the_only_video.get('vcodec'),
'vbr': the_only_video.get('vbr'),
'stretched_ratio': the_only_video.get('stretched_ratio'),
})
if the_only_audio:
new_dict.update({
'acodec': the_only_audio.get('acodec'),
'abr': the_only_audio.get('abr'),
'asr': the_only_audio.get('asr'),
})
return new_dict
def _check_formats(formats):
if not check_formats:
yield from formats
return
for f in formats:
self.to_screen('[info] Testing format %s' % f['format_id'])
temp_file = tempfile.NamedTemporaryFile(
suffix='.tmp', delete=False,
dir=self.get_output_path('temp') or None)
temp_file.close()
try:
success, _ = self.dl(temp_file.name, f, test=True)
except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
success = False
finally:
if os.path.exists(temp_file.name):
try:
os.remove(temp_file.name)
except OSError:
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
if success:
yield f
else:
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
def _build_selector_function(selector):
if isinstance(selector, list): # ,
fs = [_build_selector_function(s) for s in selector]
def selector_function(ctx):
for f in fs:
yield from f(ctx)
return selector_function
elif selector.type == GROUP: # ()
selector_function = _build_selector_function(selector.selector)
elif selector.type == PICKFIRST: # /
fs = [_build_selector_function(s) for s in selector.selector]
def selector_function(ctx):
for f in fs:
picked_formats = list(f(ctx))
if picked_formats:
return picked_formats
return []
elif selector.type == MERGE: # +
selector_1, selector_2 = map(_build_selector_function, selector.selector)
def selector_function(ctx):
for pair in itertools.product(
selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
yield _merge(pair)
elif selector.type == SINGLE: # atom
format_spec = selector.selector or 'best'
# TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
if format_spec == 'all':
def selector_function(ctx):
yield from _check_formats(ctx['formats'])
elif format_spec == 'mergeall':
def selector_function(ctx):
formats = list(_check_formats(ctx['formats']))
if not formats:
return
merged_format = formats[-1]
for f in formats[-2::-1]:
merged_format = _merge((merged_format, f))
yield merged_format
else:
format_fallback, format_reverse, format_idx = False, True, 1
mobj = re.match(
r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
format_spec)
if mobj is not None:
format_idx = int_or_none(mobj.group('n'), default=1)
format_reverse = mobj.group('bw')[0] == 'b'
format_type = (mobj.group('type') or [None])[0]
not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
format_modified = mobj.group('mod') is not None
format_fallback = not format_type and not format_modified # for b, w
_filter_f = (
(lambda f: f.get('%scodec' % format_type) != 'none')
if format_type and format_modified # bv*, ba*, wv*, wa*
else (lambda f: f.get('%scodec' % not_format_type) == 'none')
if format_type # bv, ba, wv, wa
else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
if not format_modified # b, w
else lambda f: True) # b*, w*
filter_f = lambda f: _filter_f(f) and (
f.get('vcodec') != 'none' or f.get('acodec') != 'none')
else:
if format_spec in self._format_selection_exts['audio']:
filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
elif format_spec in self._format_selection_exts['video']:
filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
elif format_spec in self._format_selection_exts['storyboards']:
filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
else:
filter_f = lambda f: f.get('format_id') == format_spec # id
def selector_function(ctx):
formats = list(ctx['formats'])
matches = list(filter(filter_f, formats)) if filter_f is not None else formats
if format_fallback and ctx['incomplete_formats'] and not matches:
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) best/worst will fallback to
# best/worst {video,audio}-only format
matches = formats
matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
try:
yield matches[format_idx - 1]
except IndexError:
return
filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx):
ctx_copy = copy.deepcopy(ctx)
for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy)
return final_selector
stream = io.BytesIO(format_spec.encode('utf-8'))
try:
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object):
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter >= len(self.tokens):
raise StopIteration()
value = self.tokens[self.counter]
self.counter += 1
return value
next = __next__
def restore_last_token(self):
self.counter -= 1
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
add_headers = info_dict.get('http_headers')
if add_headers:
res.update(add_headers)
cookies = self._calc_cookies(info_dict)
if cookies:
res['Cookie'] = cookies
if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip
return res
def _calc_cookies(self, info_dict):
pr = sanitized_Request(info_dict['url'])
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def _sanitize_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if thumbnails:
thumbnails.sort(key=lambda t: (
t.get('preference') if t.get('preference') is not None else -1,
t.get('width') if t.get('width') is not None else -1,
t.get('height') if t.get('height') is not None else -1,
t.get('id') if t.get('id') is not None else '',
t.get('url')))
def thumbnail_tester():
def test_thumbnail(t):
self.to_screen(f'[info] Testing thumbnail {t["id"]}')
try:
self.urlopen(HEADRequest(t['url']))
except network_exceptions as err:
self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
return False
return True
return test_thumbnail
for i, t in enumerate(thumbnails):
if t.get('id') is None:
t['id'] = '%d' % i
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
t['url'] = sanitize_url(t['url'])
if self.params.get('check_formats'):
info_dict['thumbnails'] = LazyList(filter(thumbnail_tester(), thumbnails[::-1])).reverse()
else:
info_dict['thumbnails'] = thumbnails
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result')
if 'title' not in info_dict:
raise ExtractorError('Missing "title" field in extractor result',
video_id=info_dict['id'], ie=info_dict['extractor'])
def report_force_conversion(field, field_not, conversion):
self.report_warning(
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
% (field, field_not, conversion))
def sanitize_string_field(info, string_field):
field = info.get(string_field)
if field is None or isinstance(field, compat_str):
return
report_force_conversion(string_field, 'a string', 'string')
info[string_field] = compat_str(field)
def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS:
field = info.get(numeric_field)
if field is None or isinstance(field, compat_numeric_types):
continue
report_force_conversion(numeric_field, 'numeric', 'int')
info[numeric_field] = int_or_none(field)
sanitize_string_field(info_dict, 'id')
sanitize_numeric_fields(info_dict)
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
self._sanitize_thumbnails(info_dict)
thumbnail = info_dict.get('thumbnail')
thumbnails = info_dict.get('thumbnails')
if thumbnail:
info_dict['thumbnail'] = sanitize_url(thumbnail)
elif thumbnails:
info_dict['thumbnail'] = thumbnails[-1]['url']
if info_dict.get('display_id') is None and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
if info_dict.get('duration') is not None:
info_dict['duration_string'] = formatSeconds(info_dict['duration'])
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = upload_date.strftime('%Y%m%d')
except (ValueError, OverflowError, OSError):
pass
live_keys = ('is_live', 'was_live')
live_status = info_dict.get('live_status')
if live_status is None:
for key in live_keys:
if info_dict.get(key) is False:
continue
if info_dict.get(key):
live_status = key
break
if all(info_dict.get(key) is False for key in live_keys):
live_status = 'not_live'
if live_status:
info_dict['live_status'] = live_status
for key in live_keys:
if info_dict.get(key) is None:
info_dict[key] = (live_status == key)
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)
if cc:
for _, subtitle in cc.items():
for subtitle_format in subtitle:
if subtitle_format.get('url'):
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
if subtitle_format.get('ext') is None:
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
automatic_captions = info_dict.get('automatic_captions')
subtitles = info_dict.get('subtitles')
info_dict['requested_subtitles'] = self.process_subtitles(
info_dict['id'], subtitles, automatic_captions)
# We now pick which formats have to be downloaded
if info_dict.get('formats') is None:
# There's only one format available
formats = [info_dict]
else:
formats = info_dict['formats']
info_dict['__has_drm'] = any(f.get('has_drm') for f in formats)
if not self.params.get('allow_unplayable_formats'):
formats = [f for f in formats if not f.get('has_drm')]
if not formats:
self.raise_no_formats(info_dict)
def is_wellformed(f):
url = f.get('url')
if not url:
self.report_warning(
'"url" field is missing or empty - skipping format, '
'there is an error in extractor')
return False
if isinstance(url, bytes):
sanitize_string_field(f, 'url')
return True
# Filter out malformed formats for better extraction robustness
formats = list(filter(is_wellformed, formats))
formats_dict = {}
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
sanitize_string_field(format, 'format_id')
sanitize_numeric_fields(format)
format['url'] = sanitize_url(format['url'])
if not format.get('format_id'):
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
formats_dict[format_id].append(format)
# Make sure all formats have unique format_id
common_exts = set(itertools.chain(*self._format_selection_exts.values()))
for format_id, ambiguous_formats in formats_dict.items():
ambigious_id = len(ambiguous_formats) > 1
for i, format in enumerate(ambiguous_formats):
if ambigious_id:
format['format_id'] = '%s-%d' % (format_id, i)
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
# Ensure there is no conflict between id and ext in format selection
# See https://github.com/yt-dlp/yt-dlp/issues/1282
if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
format['format_id'] = 'f%s' % format['format_id']
for i, format in enumerate(formats):
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=format_field(format, 'format_note', ' (%s)'),
)
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
if format.get('resolution') is None:
format['resolution'] = self.format_resolution(format, default=None)
if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
format['dynamic_range'] = 'SDR'
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info)
# Remove private housekeeping stuff
if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip']
# TODO Central sorting goes here
if not formats or formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# which can't be exported to json
info_dict['formats'] = formats
info_dict, _ = self.pre_process(info_dict)
if self.params.get('list_thumbnails'):
self.list_thumbnails(info_dict)
if self.params.get('listformats'):
if not info_dict.get('formats') and not info_dict.get('url'):
self.to_screen('%s has no formats' % info_dict['id'])
else:
self.list_formats(info_dict)
if self.params.get('listsubtitles'):
if 'automatic_captions' in info_dict:
self.list_subtitles(
info_dict['id'], automatic_captions, 'automatic captions')
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
list_only = self.params.get('simulate') is None and (
self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
if list_only:
# Without this printing, -F --print-json will not work
self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
return
format_selector = self.format_selector
if format_selector is None:
req_format = self._default_format_spec(info_dict, download=download)
self.write_debug('Default format spec: %s' % req_format)
format_selector = self.build_format_selector(req_format)
# While in format selection we may need to have an access to the original
# format set in order to calculate some metrics or do some processing.
# For now we need to be able to guess whether original formats provided
# by extractor are incomplete or not (i.e. whether extractor provides only
# video-only or audio-only formats) for proper formats selection for
# extractors with such incomplete formats (see
# https://github.com/ytdl-org/youtube-dl/pull/5556).
# Since formats may be filtered during format selection and may not match
# the original formats the results may be incorrect. Thus original formats
# or pre-calculated metrics should be passed to format selection routines
# as well.
# We will pass a context object containing all necessary additional data
# instead of just formats.
# This fixes incorrect format selection issue (see
# https://github.com/ytdl-org/youtube-dl/issues/10083).
incomplete_formats = (
# All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
# all formats are audio-only
or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = {
'formats': formats,
'incomplete_formats': incomplete_formats,
}
formats_to_download = list(format_selector(ctx))
if not formats_to_download:
if not self.params.get('ignore_no_formats_error'):
raise ExtractorError('Requested format is not available', expected=True,
video_id=info_dict['id'], ie=info_dict['extractor'])
else:
self.report_warning('Requested format is not available')
# Process what we can, even without any available formats.
self.process_info(dict(info_dict))
elif download:
self.to_screen(
'[info] %s: Downloading %d format(s): %s' % (
info_dict['id'], len(formats_to_download),
", ".join([f['format_id'] for f in formats_to_download])))
for fmt in formats_to_download:
new_info = dict(info_dict)
# Save a reference to the original info_dict so that it can be modified in process_info if needed
new_info['__original_infodict'] = info_dict
new_info.update(fmt)
self.process_info(new_info)
# We update the info dict with the best quality format (backwards compatibility)
if formats_to_download:
info_dict.update(formats_to_download[-1])
return info_dict
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
"""Select the requested subtitles and their format"""
available_subs = {}
if normal_subtitles and self.params.get('writesubtitles'):
available_subs.update(normal_subtitles)
if automatic_captions and self.params.get('writeautomaticsub'):
for lang, cap_info in automatic_captions.items():
if lang not in available_subs:
available_subs[lang] = cap_info
if (not self.params.get('writesubtitles') and not
self.params.get('writeautomaticsub') or not
available_subs):
return None
all_sub_langs = available_subs.keys()
if self.params.get('allsubtitles', False):
requested_langs = all_sub_langs
elif self.params.get('subtitleslangs', False):
# A list is used so that the order of languages will be the same as
# given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
requested_langs = []
for lang_re in self.params.get('subtitleslangs'):
if lang_re == 'all':
requested_langs.extend(all_sub_langs)
continue
discard = lang_re[0] == '-'
if discard:
lang_re = lang_re[1:]
current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
if discard:
for lang in current_langs:
while lang in requested_langs:
requested_langs.remove(lang)
else:
requested_langs.extend(current_langs)
requested_langs = orderedSet(requested_langs)
elif 'en' in available_subs:
requested_langs = ['en']
else:
requested_langs = [list(all_sub_langs)[0]]
if requested_langs:
self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
formats_query = self.params.get('subtitlesformat', 'best')
formats_preference = formats_query.split('/') if formats_query else []
subs = {}
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
continue
for ext in formats_preference:
if ext == 'best':
f = formats[-1]
break
matches = list(filter(lambda f: f['ext'] == ext, formats))
if matches:
f = matches[-1]
break
else:
f = formats[-1]
self.report_warning(
'No subtitle format found matching "%s" for language %s, '
'using %s' % (formats_query, lang, f['ext']))
subs[lang] = f
return subs
def __forced_printings(self, info_dict, filename, incomplete):
def print_mandatory(field, actual_field=None):
if actual_field is None:
actual_field = field
if (self.params.get('force%s' % field, False)
and (not incomplete or info_dict.get(actual_field) is not None)):
self.to_stdout(info_dict[actual_field])
def print_optional(field):
if (self.params.get('force%s' % field, False)
and info_dict.get(field) is not None):
self.to_stdout(info_dict[field])
info_dict = info_dict.copy()
if filename is not None:
info_dict['filename'] = filename
if info_dict.get('requested_formats') is not None:
# For RTMP URLs, also include the playpath
info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
elif 'url' in info_dict:
info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
if self.params.get('forceprint') or self.params.get('forcejson'):
self.post_extract(info_dict)
for tmpl in self.params.get('forceprint', []):
mobj = re.match(r'\w+(=?)$', tmpl)
if mobj and mobj.group(1):
tmpl = f'{tmpl[:-1]} = %({tmpl[:-1]})s'
elif mobj:
tmpl = '%({})s'.format(tmpl)
self.to_stdout(self.evaluate_outtmpl(tmpl, info_dict))
print_mandatory('title')
print_mandatory('id')
print_mandatory('url', 'urls')
print_optional('thumbnail')
print_optional('description')
print_optional('filename')
if self.params.get('forceduration') and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
print_mandatory('format')
if self.params.get('forcejson'):
self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
def dl(self, name, info, subtitle=False, test=False):
if not info.get('url'):
self.raise_no_formats(info, True)
if test:
verbose = self.params.get('verbose')
params = {
'test': True,
'quiet': self.params.get('quiet') or not verbose,
'verbose': verbose,
'noprogress': not verbose,
'nopart': True,
'skip_unavailable_fragments': False,
'keep_fragments': False,
'overwrites': True,
'_no_ytdl_file': True,
}
else:
params = self.params
fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
if not test:
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
urls = '", "'.join([f['url'] for f in info.get('requested_formats', [])] or [info['url']])
self.write_debug('Invoking downloader on "%s"' % urls)
new_info = copy.deepcopy(self._copy_infodict(info))
if new_info.get('http_headers') is None:
new_info['http_headers'] = self._calc_headers(new_info)
return fd.download(name, new_info, subtitle)
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get('_type', 'video') == 'video'
max_downloads = self.params.get('max_downloads')
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
# TODO: backward compatibility, to be removed
info_dict['fulltitle'] = info_dict['title']
if 'format' not in info_dict and 'ext' in info_dict:
info_dict['format'] = info_dict['ext']
if self._match_entry(info_dict) is not None:
return
self.post_extract(info_dict)
self._num_downloads += 1
# info_dict['_filename'] needs to be set for backward compatibility
info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
temp_filename = self.prepare_filename(info_dict, 'temp')
files_to_move = {}
# Forced printings
self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
if self.params.get('simulate'):
if self.params.get('force_write_download_archive', False):
self.record_download_archive(info_dict)
# Do nothing else if in simulate mode
return
if full_filename is None:
return
if not self._ensure_dir_exists(encodeFilename(full_filename)):
return
if not self._ensure_dir_exists(encodeFilename(temp_filename)):
return
if self._write_description('video', info_dict,
self.prepare_filename(info_dict, 'description')) is None:
return
sub_files = self._write_subtitles(info_dict, temp_filename)
if sub_files is None:
return
files_to_move.update(dict(sub_files))
thumb_files = self._write_thumbnails(
'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
if thumb_files is None:
return
files_to_move.update(dict(thumb_files))
infofn = self.prepare_filename(info_dict, 'infojson')
_infojson_written = self._write_info_json('video', info_dict, infofn)
if _infojson_written:
info_dict['__infojson_filename'] = infofn
elif _infojson_written is None:
return
# Note: Annotations are deprecated
annofn = None
if self.params.get('writeannotations', False):
annofn = self.prepare_filename(info_dict, 'annotation')
if annofn:
if not self._ensure_dir_exists(encodeFilename(annofn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
elif not info_dict.get('annotations'):
self.report_warning('There are no annotations to write.')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
self.report_error('Cannot write annotations file: ' + annofn)
return
# Write internet shortcut files
url_link = webloc_link = desktop_link = False
if self.params.get('writelink', False):
if sys.platform == "darwin": # macOS.
webloc_link = True
elif sys.platform.startswith("linux"):
desktop_link = True
else: # if sys.platform in ['win32', 'cygwin']:
url_link = True
if self.params.get('writeurllink', False):
url_link = True
if self.params.get('writewebloclink', False):
webloc_link = True
if self.params.get('writedesktoplink', False):
desktop_link = True
if url_link or webloc_link or desktop_link:
if 'webpage_url' not in info_dict:
self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
return
ascii_url = iri_to_uri(info_dict['webpage_url'])
def _write_link_file(extension, template, newline, embed_filename):
linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
self.to_screen('[info] Internet shortcut is already present')
else:
try:
self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
template_vars = {'url': ascii_url}
if embed_filename:
template_vars['filename'] = linkfn[:-(len(extension) + 1)]
linkfile.write(template % template_vars)
except (OSError, IOError):
self.report_error('Cannot write internet shortcut ' + linkfn)
return False
return True
if url_link:
if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
return
if webloc_link:
if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
return
if desktop_link:
if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
return
try:
info_dict, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
except PostProcessingError as err:
self.report_error('Preprocessing: %s' % str(err))
return
must_record_download_archive = False
if self.params.get('skip_download', False):
info_dict['filepath'] = temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
info_dict['__files_to_move'] = files_to_move
info_dict = self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)
else:
# Download
info_dict.setdefault('__postprocessors', [])
try:
def existing_file(*filepaths):
ext = info_dict.get('ext')
final_ext = self.params.get('final_ext', ext)
existing_files = []
for file in orderedSet(filepaths):
if final_ext != ext:
converted = replace_extension(file, final_ext, ext)
if os.path.exists(encodeFilename(converted)):
existing_files.append(converted)
if os.path.exists(encodeFilename(file)):
existing_files.append(file)
if not existing_files or self.params.get('overwrites', False):
for file in orderedSet(existing_files):
self.report_file_delete(file)
os.remove(encodeFilename(file))
return None
info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
return existing_files[0]
success = True
if info_dict.get('requested_formats') is not None:
def compatible_formats(formats):
# TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
video_formats = [format for format in formats if format.get('vcodec') != 'none']
audio_formats = [format for format in formats if format.get('acodec') != 'none']
if len(video_formats) > 2 or len(audio_formats) > 2:
return False
# Check extension
exts = set(format.get('ext') for format in formats)
COMPATIBLE_EXTS = (
set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
set(('webm',)),
)
for ext_sets in COMPATIBLE_EXTS:
if ext_sets.issuperset(exts):
return True
# TODO: Check acodec/vcodec
return False
requested_formats = info_dict['requested_formats']
old_ext = info_dict['ext']
if self.params.get('merge_output_format') is None:
if not compatible_formats(requested_formats):
info_dict['ext'] = 'mkv'
self.report_warning(
'Requested formats are incompatible for merge and will be merged into mkv')
if (info_dict['ext'] == 'webm'
and info_dict.get('thumbnails')
# check with type instead of pp_key, __name__, or isinstance
# since we dont want any custom PPs to trigger this
and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])):
info_dict['ext'] = 'mkv'
self.report_warning(
'webm doesn\'t support embedding a thumbnail, mkv will be used')
new_ext = info_dict['ext']
def correct_ext(filename, ext=new_ext):
if filename == '-':
return filename
filename_real_ext = os.path.splitext(filename)[1][1:]
filename_wo_ext = (
os.path.splitext(filename)[0]
if filename_real_ext in (old_ext, new_ext)
else filename)
return '%s.%s' % (filename_wo_ext, ext)
# Ensure filename always has a correct extension for successful merge
full_filename = correct_ext(full_filename)
temp_filename = correct_ext(temp_filename)
dl_filename = existing_file(full_filename, temp_filename)
info_dict['__real_download'] = False
if dl_filename is not None:
self.report_file_already_downloaded(dl_filename)
elif get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-'):
info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
success, real_download = self.dl(temp_filename, info_dict)
info_dict['__real_download'] = real_download
else:
downloaded = []
merger = FFmpegMergerPP(self)
if self.params.get('allow_unplayable_formats'):
self.report_warning(
'You have requested merging of multiple formats '
'while also allowing unplayable formats to be downloaded. '
'The formats won\'t be merged to prevent data corruption.')
elif not merger.available:
self.report_warning(
'You have requested merging of multiple formats but ffmpeg is not installed. '
'The formats won\'t be merged.')
if temp_filename == '-':
reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict)
else 'but the formats are incompatible for simultaneous download' if merger.available
else 'but ffmpeg is not installed')
self.report_warning(
f'You have requested downloading multiple formats to stdout {reason}. '
'The formats will be streamed one after the other')
fname = temp_filename
for f in requested_formats:
new_info = dict(info_dict)
del new_info['requested_formats']
new_info.update(f)
if temp_filename != '-':
fname = prepend_extension(
correct_ext(temp_filename, new_info['ext']),
'f%s' % f['format_id'], new_info['ext'])
if not self._ensure_dir_exists(fname):
return
f['filepath'] = fname
downloaded.append(fname)
partial_success, real_download = self.dl(fname, new_info)
info_dict['__real_download'] = info_dict['__real_download'] or real_download
success = success and partial_success
if merger.available and not self.params.get('allow_unplayable_formats'):
info_dict['__postprocessors'].append(merger)
info_dict['__files_to_merge'] = downloaded
# Even if there were no downloads, it is being merged only now
info_dict['__real_download'] = True
else:
for file in downloaded:
files_to_move[file] = None
else:
# Just a single file
dl_filename = existing_file(full_filename, temp_filename)
if dl_filename is None or dl_filename == temp_filename:
# dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
# So we should try to resume the download
success, real_download = self.dl(temp_filename, info_dict)
info_dict['__real_download'] = real_download
else:
self.report_file_already_downloaded(dl_filename)
dl_filename = dl_filename or temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
except network_exceptions as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success and full_filename != '-':
def fixup():
do_fixup = True
fixup_policy = self.params.get('fixup')
vid = info_dict['id']
if fixup_policy in ('ignore', 'never'):
return
elif fixup_policy == 'warn':
do_fixup = False
elif fixup_policy != 'force':
assert fixup_policy in ('detect_or_warn', None)
if not info_dict.get('__real_download'):
do_fixup = False
def ffmpeg_fixup(cndn, msg, cls):
if not cndn:
return
if not do_fixup:
self.report_warning(f'{vid}: {msg}')
return
pp = cls(self)
if pp.available:
info_dict['__postprocessors'].append(pp)
else:
self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
stretched_ratio = info_dict.get('stretched_ratio')
ffmpeg_fixup(
stretched_ratio not in (1, None),
f'Non-uniform pixel ratio {stretched_ratio}',
FFmpegFixupStretchedPP)
ffmpeg_fixup(
(info_dict.get('requested_formats') is None
and info_dict.get('container') == 'm4a_dash'
and info_dict.get('ext') == 'm4a'),
'writing DASH m4a. Only some players support this container',
FFmpegFixupM4aPP)
downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
downloader = downloader.__name__ if downloader else None
ffmpeg_fixup(info_dict.get('requested_formats') is None and downloader == 'HlsFD',
'malformed AAC bitstream detected', FFmpegFixupM3u8PP)
ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed timestamps detected', FFmpegFixupTimestampPP)
ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed duration detected', FFmpegFixupDurationPP)
fixup()
try:
info_dict = self.post_process(dl_filename, info_dict, files_to_move)
except PostProcessingError as err:
self.report_error('Postprocessing: %s' % str(err))
return
try:
for ph in self._post_hooks:
ph(info_dict['filepath'])
except Exception as err:
self.report_error('post hooks: %s' % str(err))
return
must_record_download_archive = True
if must_record_download_archive or self.params.get('force_write_download_archive', False):
self.record_download_archive(info_dict)
max_downloads = self.params.get('max_downloads')
if max_downloads is not None and self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
def download(self, url_list):
"""Download a given list of URLs."""
outtmpl = self.outtmpl_dict['default']
if (len(url_list) > 1
and outtmpl != '-'
and '%' not in outtmpl
and self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
try:
# It also downloads the videos
res = self.extract_info(
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloads reached')
raise
except ExistingVideoReached:
self.to_screen('[info] Encountered a video that is already in the archive, stopping due to --break-on-existing')
raise
except RejectedVideoReached:
self.to_screen('[info] Encountered a video that did not match filter, stopping due to --break-on-reject')
raise
else:
if self.params.get('dump_single_json', False):
self.post_extract(res)
self.to_stdout(json.dumps(self.sanitize_info(res)))
return self._download_retcode
def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput(
[info_filename], mode='r',
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
try:
self.process_ie_result(info, download=True)
except (DownloadError, EntryNotInPlaylist, ThrottledDownload):
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
return self.download([webpage_url])
else:
raise
return self._download_retcode
@staticmethod
def sanitize_info(info_dict, remove_private_keys=False):
''' Sanitize the infodict for converting to json '''
if info_dict is None:
return info_dict
info_dict.setdefault('epoch', int(time.time()))
remove_keys = {'__original_infodict'} # Always remove this since this may contain a copy of the entire dict
keep_keys = ['_type'], # Always keep this to facilitate load-info-json
if remove_private_keys:
remove_keys |= {
'requested_formats', 'requested_subtitles', 'requested_entries',
'filepath', 'entries', 'original_url', 'playlist_autonumber',
}
empty_values = (None, {}, [], set(), tuple())
reject = lambda k, v: k not in keep_keys and (
k.startswith('_') or k in remove_keys or v in empty_values)
else:
reject = lambda k, v: k in remove_keys
filter_fn = lambda obj: (
list(map(filter_fn, obj)) if isinstance(obj, (LazyList, list, tuple, set))
else obj if not isinstance(obj, dict)
else dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v)))
return filter_fn(info_dict)
@staticmethod
def filter_requested_info(info_dict, actually_filter=True):
''' Alias of sanitize_info for backward compatibility '''
return YoutubeDL.sanitize_info(info_dict, actually_filter)
def run_pp(self, pp, infodict):
files_to_delete = []
if '__files_to_move' not in infodict:
infodict['__files_to_move'] = {}
try:
files_to_delete, infodict = pp.run(infodict)
except PostProcessingError as e:
# Must be True and not 'only_download'
if self.params.get('ignoreerrors') is True:
self.report_error(e)
return infodict
raise
if not files_to_delete:
return infodict
if self.params.get('keepvideo', False):
for f in files_to_delete:
infodict['__files_to_move'].setdefault(f, '')
else:
for old_filename in set(files_to_delete):
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try:
os.remove(encodeFilename(old_filename))
except (IOError, OSError):
self.report_warning('Unable to remove downloaded original file')
if old_filename in infodict['__files_to_move']:
del infodict['__files_to_move'][old_filename]
return infodict
@staticmethod
def post_extract(info_dict):
def actual_post_extract(info_dict):
if info_dict.get('_type') in ('playlist', 'multi_video'):
for video_dict in info_dict.get('entries', {}):
actual_post_extract(video_dict or {})
return
post_extractor = info_dict.get('__post_extractor') or (lambda: {})
extra = post_extractor().items()
info_dict.update(extra)
info_dict.pop('__post_extractor', None)
original_infodict = info_dict.get('__original_infodict') or {}
original_infodict.update(extra)
original_infodict.pop('__post_extractor', None)
actual_post_extract(info_dict or {})
def pre_process(self, ie_info, key='pre_process', files_to_move=None):
info = dict(ie_info)
info['__files_to_move'] = files_to_move or {}
for pp in self._pps[key]:
info = self.run_pp(pp, info)
return info, info.pop('__files_to_move', None)
def post_process(self, filename, ie_info, files_to_move=None):
"""Run all the postprocessors on the given file."""
info = dict(ie_info)
info['filepath'] = filename
info['__files_to_move'] = files_to_move or {}
for pp in ie_info.get('__postprocessors', []) + self._pps['post_process']:
info = self.run_pp(pp, info)
info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
del info['__files_to_move']
for pp in self._pps['after_move']:
info = self.run_pp(pp, info)
return info
def _make_archive_id(self, info_dict):
video_id = info_dict.get('id')
if not video_id:
return
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
if extractor is None:
url = str_or_none(info_dict.get('url'))
if not url:
return
# Try to find matching extractor for the URL and take its ie_key
for ie_key, ie in self._ies.items():
if ie.suitable(url):
extractor = ie_key
break
else:
return
return '%s %s' % (extractor.lower(), video_id)
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if not vid_id:
return False # Incomplete video information
return vid_id in self.archive
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
self.archive.add(vid_id)
@staticmethod
def format_resolution(format, default='unknown'):
is_images = format.get('vcodec') == 'none' and format.get('acodec') == 'none'
if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('width') and format.get('height'):
res = '%dx%d' % (format['width'], format['height'])
elif format.get('height'):
res = '%sp' % format['height']
elif format.get('width'):
res = '%dx?' % format['width']
elif is_images:
return 'images'
else:
return default
return f'{res} images' if is_images else res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])
new_format = (
'list-formats' not in self.params.get('compat_opts', [])
and self.params.get('listformats_table', True) is not False)
if new_format:
table = [
[
format_field(f, 'format_id'),
format_field(f, 'ext'),
self.format_resolution(f),
format_field(f, 'fps', '%d'),
format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
'|',
format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
format_field(f, 'tbr', '%4dk'),
shorten_protocol_name(f.get('protocol', '').replace("native", "n")),
'|',
format_field(f, 'vcodec', default='unknown').replace('none', ''),
format_field(f, 'vbr', '%4dk'),
format_field(f, 'acodec', default='unknown').replace('none', ''),
format_field(f, 'abr', '%3dk'),
format_field(f, 'asr', '%5dHz'),
', '.join(filter(None, (
'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
format_field(f, 'language', '[%s]'),
format_field(f, 'format_note'),
format_field(f, 'container', ignore=(None, f.get('ext'))),
))),
] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', 'HDR', '|', ' FILESIZE', ' TBR', 'PROTO',
'|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'MORE INFO']
else:
table = [
[
format_field(f, 'format_id'),
format_field(f, 'ext'),
self.format_resolution(f),
self._format_note(f)]
for f in formats
if f.get('preference') is None or f['preference'] >= -1000]
header_line = ['format code', 'extension', 'resolution', 'note']
self.to_screen(
'[info] Available formats for %s:' % info_dict['id'])
self.to_stdout(render_table(
header_line, table, delim=new_format, extraGap=(0 if new_format else 1), hideEmpty=new_format))
def list_thumbnails(self, info_dict):
thumbnails = list(info_dict.get('thumbnails'))
if not thumbnails:
self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
return
self.to_screen(
'[info] Thumbnails for %s:' % info_dict['id'])
self.to_stdout(render_table(
['ID', 'width', 'height', 'URL'],
[[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
def list_subtitles(self, video_id, subtitles, name='subtitles'):
if not subtitles:
self.to_screen('%s has no %s' % (video_id, name))
return
self.to_screen(
'Available %s for %s:' % (name, video_id))
def _row(lang, formats):
exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
if len(set(names)) == 1:
names = [] if names[0] == 'unknown' else names[:1]
return [lang, ', '.join(names), ', '.join(exts)]
self.to_stdout(render_table(
['Language', 'Name', 'Formats'],
[_row(lang, formats) for lang, formats in subtitles.items()],
hideEmpty=True))
def urlopen(self, req):
""" Start an HTTP download """
if isinstance(req, compat_basestring):
req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
get_encoding = lambda stream: getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, stdout %s, stderr %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
get_encoding(self._screen_file), get_encoding(self._err_file),
self.get_encoding()))
logger = self.params.get('logger')
if logger:
write_debug = lambda msg: logger.debug(f'[debug] {msg}')
write_debug(encoding_str)
else:
write_debug = lambda msg: self._write_string(f'[debug] {msg}')
write_string(encoding_str, encoding=None)
source = detect_variant()
write_debug('yt-dlp version %s%s\n' % (__version__, '' if source == 'unknown' else f' ({source})'))
if _LAZY_LOADER:
write_debug('Lazy loading extractors enabled\n')
if plugin_extractors or plugin_postprocessors:
write_debug('Plugins: %s\n' % [
'%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
if self.params.get('compat_opts'):
write_debug('Compatibility options: %s\n' % ', '.join(self.params.get('compat_opts')))
try:
sp = Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = sp.communicate_or_kill()
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
write_debug('Git HEAD: %s\n' % out)
except Exception:
try:
sys.exc_clear()
except Exception:
pass
def python_implementation():
impl_name = platform.python_implementation()
if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
return impl_name
write_debug('Python version %s (%s %s) - %s\n' % (
platform.python_version(),
python_implementation(),
platform.architecture()[0],
platform_name()))
exe_versions = FFmpegPostProcessor.get_versions(self)
exe_versions['rtmpdump'] = rtmpdump_version()
exe_versions['phantomjs'] = PhantomJSwrapper._version()
exe_str = ', '.join(
f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
) or 'none'
write_debug('exe versions: %s\n' % exe_str)
from .downloader.websocket import has_websockets
from .postprocessor.embedthumbnail import has_mutagen
from .cookies import SQLITE_AVAILABLE, KEYRING_AVAILABLE
lib_str = ', '.join(sorted(filter(None, (
compat_pycrypto_AES and compat_pycrypto_AES.__name__.split('.')[0],
has_websockets and 'websockets',
has_mutagen and 'mutagen',
SQLITE_AVAILABLE and 'sqlite',
KEYRING_AVAILABLE and 'keyring',
)))) or 'none'
write_debug('Optional libraries: %s\n' % lib_str)
write_debug('ANSI escape support: stdout = %s, stderr = %s\n' % (
supports_terminal_sequences(self._screen_file),
supports_terminal_sequences(self._err_file)))
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
write_debug('Proxy map: ' + compat_str(proxy_map) + '\n')
if self.params.get('call_home', False):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
write_debug('Public IP address: %s\n' % ipaddr)
return
latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode('utf-8')
if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning(
'You are using an outdated version (newest version: %s)! '
'See https://yt-dl.org/update if you need help updating.' %
latest_version)
def _setup_opener(self):
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = PerRequestProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
redirect_handler = YoutubeDLRedirectHandler()
data_handler = compat_urllib_request_DataHandler()
# When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
# https://github.com/ytdl-org/youtube-dl/issues/8227)
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
file_handler.file_open = file_open
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
def _write_info_json(self, label, ie_result, infofn):
''' Write infojson and returns True = written, False = skip, None = error '''
if not self.params.get('writeinfojson'):
return False
elif not infofn:
self.write_debug(f'Skipping writing {label} infojson')
return False
elif not self._ensure_dir_exists(infofn):
return None
elif not self.params.get('overwrites', True) and os.path.exists(infofn):
self.to_screen(f'[info] {label.title()} metadata is already present')
else:
self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
try:
write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
except (OSError, IOError):
self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
return None
return True
def _write_description(self, label, ie_result, descfn):
''' Write description and returns True = written, False = skip, None = error '''
if not self.params.get('writedescription'):
return False
elif not descfn:
self.write_debug(f'Skipping writing {label} description')
return False
elif not self._ensure_dir_exists(descfn):
return None
elif not self.params.get('overwrites', True) and os.path.exists(descfn):
self.to_screen(f'[info] {label.title()} description is already present')
elif ie_result.get('description') is None:
self.report_warning(f'There\'s no {label} description to write')
return False
else:
try:
self.to_screen(f'[info] Writing {label} description to: {descfn}')
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(ie_result['description'])
except (OSError, IOError):
self.report_error(f'Cannot write {label} description file {descfn}')
return None
return True
def _write_subtitles(self, info_dict, filename):
''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
ret = []
subtitles = info_dict.get('requested_subtitles')
if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
return ret
sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
if not sub_filename_base:
self.to_screen('[info] Skipping writing video subtitles')
return ret
for sub_lang, sub_info in subtitles.items():
sub_format = sub_info['ext']
sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
if not self.params.get('overwrites', True) and os.path.exists(sub_filename):
self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final))
continue
self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
if sub_info.get('data') is not None:
try:
# Use newline='' to prevent conversion of newline characters
# See https://github.com/ytdl-org/youtube-dl/issues/10268
with io.open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data'])
sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final))
continue
except (OSError, IOError):
self.report_error(f'Cannot write video subtitles file {sub_filename}')
return None
try:
sub_copy = sub_info.copy()
sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
self.dl(sub_filename, sub_copy, subtitle=True)
sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final))
except (ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
self.report_warning(f'Unable to download video subtitles for {sub_lang!r}: {err}')
continue
return ret
def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
write_all = self.params.get('write_all_thumbnails', False)
thumbnails, ret = [], []
if write_all or self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails') or []
multiple = write_all and len(thumbnails) > 1
if thumb_filename_base is None:
thumb_filename_base = filename
if thumbnails and not thumb_filename_base:
self.write_debug(f'Skipping writing {label} thumbnail')
return ret
for t in thumbnails[::-1]:
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
thumb_display_id = f'{label} thumbnail' + (f' {t["id"]}' if multiple else '')
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
if not self.params.get('overwrites', True) and os.path.exists(thumb_filename):
ret.append((thumb_filename, thumb_filename_final))
t['filepath'] = thumb_filename
self.to_screen(f'[info] {thumb_display_id.title()} is already present')
else:
self.to_screen(f'[info] Downloading {thumb_display_id} ...')
try:
uf = self.urlopen(t['url'])
self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
ret.append((thumb_filename, thumb_filename_final))
t['filepath'] = thumb_filename
except network_exceptions as err:
self.report_warning(f'Unable to download {thumb_display_id}: {err}')
if ret and not write_all:
break
return ret
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
wazimap_np/settings.py
|
# pull in the default wazimap settings
from wazimap.settings import * # noqa
# install this app before Wazimap
INSTALLED_APPS = ['wazimap_np'] + INSTALLED_APPS
DEBUG = False if (os.environ.get('APP_ENV', 'dev') == 'prod') else True
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://wazimap_np:wazimap_np@localhost/wazimap_np')
DATABASES['default'] = dj_database_url.parse(DATABASE_URL)
DATABASES['default']['ATOMIC_REQUESTS'] = True
SCHEME = 'http' if (os.environ.get('APP_ENV', 'dev') == 'dev') else 'https'
URL = SCHEME+'://'+'nepalmap.org'
# Localise this instance of Wazimap
WAZIMAP['name'] = 'NepalMap'
# NB: this must be https if your site supports HTTPS.
WAZIMAP['url'] = URL
WAZIMAP['country_code'] = 'NP'
WAZIMAP['profile_builder'] = 'wazimap_np.profiles.get_census_profile'
WAZIMAP['levels'] = {
'country': {
'plural': 'countries',
'children': ['province']
},
'province': { # 7 provinces
'plural': 'provinces',
'children': ['district']
},
'district': { # 77 districts
'plural': 'districts',
'children': ['local']
},
# 6 Metropolitan Cities (Mahanagarpalika)
# 11 Sub-Metropolitan Cities (Upa-Mahanagarpalika)
# 276 Municipalities (Nagarpalika)
# 460 Rural Municipalities (Gaunpalika)
'local': { # 753 local bodies
'plural': 'locals',
'children': []
}
}
WAZIMAP['comparative_levels'] = ['country', 'province', 'district', 'local']
WAZIMAP['geometry_data'] = {
'country': 'geo/country.topojson',
'province': 'geo/province.topojson',
'district': 'geo/district.topojson',
'local': 'geo/local.topojson'
}
WAZIMAP['ga_tracking_id'] = os.environ.get('GA_TRACKING_ID')
WAZIMAP['twitter'] = '@codefornepal'
WAZIMAP['map_centre'] = [28.229651, 83.8165328]
WAZIMAP['map_zoom'] = 7
# Custom Settings
WAZIMAP['email'] = '[email protected]'
WAZIMAP['github'] = 'https://github.com/Code4Nepal/nepalmap_federal'
WAZIMAP['tagline'] = 'Explore and understand Nepal using data'
WAZIMAP['facebook'] = 'codefornepal'
WAZIMAP['twittercard'] = True
|
[] |
[] |
[
"APP_ENV",
"DATABASE_URL",
"GA_TRACKING_ID"
] |
[]
|
["APP_ENV", "DATABASE_URL", "GA_TRACKING_ID"]
|
python
| 3 | 0 | |
web/clui/model/user_test.go
|
/*
Copyright <holder> All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package model
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
build.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
Controls the building of the C extension model.
"""
import os
import sysconfig
from ast import literal_eval
from distutils.command.build_ext import build_ext # type: ignore
from distutils.core import Extension
try:
from dotenv import load_dotenv # type: ignore
except ImportError:
pass
else:
load_dotenv()
# Should we turn on debugging?
DEBUG = literal_eval(os.environ.get("FST_LOOKUP_DEBUG", "False"))
SHOULD_BUILD_EXTENSION = literal_eval(os.environ.get("FST_LOOKUP_BUILD_EXT", "True"))
# Get compiler flags from the current Python version:
extra_compile_args = (sysconfig.get_config_var("CFLAGS") or "").split()
extra_compile_args += ["-std=c99", "-Wall", "-Wextra"]
if DEBUG:
# Enable debug symbols, assertions, and disable optimizations
extra_compile_args += ["-g3", "-O0", "-UNDEBUG", "-Werror"]
extensions = [
Extension(
"fst_lookup._fst_lookup",
sources=["fst_lookup/_fst_lookup.c"],
extra_compile_args=extra_compile_args,
)
]
def build(setup_kwargs):
"""
Setup to build a C extension.
"""
if SHOULD_BUILD_EXTENSION:
setup_kwargs.update({"ext_modules": extensions})
|
[] |
[] |
[
"FST_LOOKUP_DEBUG",
"FST_LOOKUP_BUILD_EXT"
] |
[]
|
["FST_LOOKUP_DEBUG", "FST_LOOKUP_BUILD_EXT"]
|
python
| 2 | 0 | |
pkg/config/config.go
|
/*
Copyright (c) 2018 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file contains the types and functions used to manage the configuration of the command line
// client.
package config
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/dgrijalva/jwt-go"
"github.com/golang/glog"
"github.com/mitchellh/go-homedir"
sdk "github.com/openshift-online/ocm-sdk-go"
"github.com/openshift-online/ocm-cli/pkg/debug"
)
// Config is the type used to store the configuration of the client.
// There's no way to line-split or predefine tags, so...
//nolint:lll
type Config struct {
// TODO(efried): Better docs for things like AccessToken
// TODO(efried): Dedup with flag docs in cmd/ocm/login/cmd.go:init where possible
AccessToken string `json:"access_token,omitempty" doc:"Bearer access token."`
ClientID string `json:"client_id,omitempty" doc:"OpenID client identifier."`
ClientSecret string `json:"client_secret,omitempty" doc:"OpenID client secret."`
Insecure bool `json:"insecure,omitempty" doc:"Enables insecure communication with the server. This disables verification of TLS certificates and host names."`
Password string `json:"password,omitempty" doc:"User password."`
RefreshToken string `json:"refresh_token,omitempty" doc:"Offline or refresh token."`
Scopes []string `json:"scopes,omitempty" doc:"OpenID scope. If this option is used it will replace completely the default scopes. Can be repeated multiple times to specify multiple scopes."`
TokenURL string `json:"token_url,omitempty" doc:"OpenID token URL."`
URL string `json:"url,omitempty" doc:"URL of the API gateway. The value can be the complete URL or an alias. The valid aliases are 'production', 'staging' and 'integration'."`
User string `json:"user,omitempty" doc:"User name."`
}
// Load loads the configuration from the configuration file. If the configuration file doesn't exist
// it will return an empty configuration object.
func Load() (cfg *Config, err error) {
file, err := Location()
if err != nil {
return
}
_, err = os.Stat(file)
if os.IsNotExist(err) {
cfg = nil
err = nil
return
}
if err != nil {
err = fmt.Errorf("can't check if config file '%s' exists: %v", file, err)
return
}
// #nosec G304
data, err := ioutil.ReadFile(file)
if err != nil {
err = fmt.Errorf("can't read config file '%s': %v", file, err)
return
}
cfg = new(Config)
err = json.Unmarshal(data, cfg)
if err != nil {
err = fmt.Errorf("can't parse config file '%s': %v", file, err)
return
}
return
}
// Save saves the given configuration to the configuration file.
func Save(cfg *Config) error {
file, err := Location()
if err != nil {
return err
}
data, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return fmt.Errorf("can't marshal config: %v", err)
}
err = ioutil.WriteFile(file, data, 0600)
if err != nil {
return fmt.Errorf("can't write file '%s': %v", file, err)
}
return nil
}
// Remove removes the configuration file.
func Remove() error {
file, err := Location()
if err != nil {
return err
}
_, err = os.Stat(file)
if os.IsNotExist(err) {
return nil
}
err = os.Remove(file)
if err != nil {
return err
}
return nil
}
// Location returns the location of the configuration file.
func Location() (path string, err error) {
if ocmconfig := os.Getenv("OCM_CONFIG"); ocmconfig != "" {
path = ocmconfig
} else {
home, err := homedir.Dir()
if err != nil {
return "", err
}
path = filepath.Join(home, ".ocm.json")
}
return path, nil
}
// Armed checks if the configuration contains either credentials or tokens that haven't expired, so
// that it can be used to perform authenticated requests.
func (c *Config) Armed() (armed bool, err error) {
if c.User != "" && c.Password != "" {
armed = true
return
}
if c.ClientID != "" && c.ClientSecret != "" {
armed = true
return
}
now := time.Now()
if c.AccessToken != "" {
var expires bool
var left time.Duration
expires, left, err = tokenExpiry(c.AccessToken, now)
if err != nil {
return
}
if !expires || left > 5*time.Second {
armed = true
return
}
}
if c.RefreshToken != "" {
var expires bool
var left time.Duration
expires, left, err = tokenExpiry(c.RefreshToken, now)
if err != nil {
return
}
if !expires || left > 10*time.Second {
armed = true
return
}
}
return
}
// Connection creates a connection using this configuration.
func (c *Config) Connection() (connection *sdk.Connection, err error) {
// Create the logger:
level := glog.Level(1)
if debug.Enabled() {
level = glog.Level(0)
}
logger, err := sdk.NewGlogLoggerBuilder().
DebugV(level).
InfoV(level).
WarnV(level).
Build()
if err != nil {
return
}
// Prepare the builder for the connection adding only the properties that have explicit
// values in the configuration, so that default values won't be overridden:
builder := sdk.NewConnectionBuilder()
builder.Logger(logger)
if c.TokenURL != "" {
builder.TokenURL(c.TokenURL)
}
if c.ClientID != "" || c.ClientSecret != "" {
builder.Client(c.ClientID, c.ClientSecret)
}
if c.Scopes != nil {
builder.Scopes(c.Scopes...)
}
if c.URL != "" {
builder.URL(c.URL)
}
if c.User != "" || c.Password != "" {
builder.User(c.User, c.Password)
}
tokens := make([]string, 0, 2)
if c.AccessToken != "" {
tokens = append(tokens, c.AccessToken)
}
if c.RefreshToken != "" {
tokens = append(tokens, c.RefreshToken)
}
if len(tokens) > 0 {
builder.Tokens(tokens...)
}
builder.Insecure(c.Insecure)
// Create the connection:
connection, err = builder.Build()
if err != nil {
return
}
return
}
func tokenExpiry(text string, now time.Time) (expires bool, left time.Duration, err error) {
parser := new(jwt.Parser)
token, _, err := parser.ParseUnverified(text, jwt.MapClaims{})
if err != nil {
err = fmt.Errorf("cant parse token: %v", err)
return
}
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
err = fmt.Errorf("expected map claims bug got %T", claims)
return
}
claim, ok := claims["exp"]
if !ok {
err = fmt.Errorf("token doesn't contain the 'exp' claim")
return
}
exp, ok := claim.(float64)
if !ok {
err = fmt.Errorf("expected floating point 'exp' but got %T", claim)
return
}
if exp == 0 {
expires = false
left = 0
} else {
expires = true
left = time.Unix(int64(exp), 0).Sub(now)
}
return
}
|
[
"\"OCM_CONFIG\""
] |
[] |
[
"OCM_CONFIG"
] |
[]
|
["OCM_CONFIG"]
|
go
| 1 | 0 | |
homeassistant/__main__.py
|
"""Start Home Assistant."""
from __future__ import print_function
import argparse
import os
import platform
import subprocess
import sys
import threading
from typing import List, Dict, Any, TYPE_CHECKING
from homeassistant import monkey_patch
from homeassistant.const import __version__, REQUIRED_PYTHON_VER, RESTART_EXIT_CODE
if TYPE_CHECKING:
from homeassistant import core
def set_loop() -> None:
"""Attempt to use uvloop."""
import asyncio
from asyncio.events import BaseDefaultEventLoopPolicy
policy = None
if sys.platform == "win32":
if hasattr(asyncio, "WindowsProactorEventLoopPolicy"):
# pylint: disable=no-member
policy = asyncio.WindowsProactorEventLoopPolicy()
else:
class ProactorPolicy(BaseDefaultEventLoopPolicy):
"""Event loop policy to create proactor loops."""
_loop_factory = asyncio.ProactorEventLoop
policy = ProactorPolicy()
else:
try:
import uvloop
except ImportError:
pass
else:
policy = uvloop.EventLoopPolicy()
if policy is not None:
asyncio.set_event_loop_policy(policy)
def validate_python() -> None:
"""Validate that the right Python version is running."""
if sys.version_info[:3] < REQUIRED_PYTHON_VER:
print(
"Home Assistant requires at least Python {}.{}.{}".format(
*REQUIRED_PYTHON_VER
)
)
sys.exit(1)
def ensure_config_path(config_dir: str) -> None:
"""Validate the configuration directory."""
import homeassistant.config as config_util
lib_dir = os.path.join(config_dir, "deps")
# Test if configuration directory exists
if not os.path.isdir(config_dir):
if config_dir != config_util.get_default_config_dir():
print(
(
"Fatal Error: Specified configuration directory does "
"not exist {} "
).format(config_dir)
)
sys.exit(1)
try:
os.mkdir(config_dir)
except OSError:
print(
(
"Fatal Error: Unable to create default configuration "
"directory {} "
).format(config_dir)
)
sys.exit(1)
# Test if library directory exists
if not os.path.isdir(lib_dir):
try:
os.mkdir(lib_dir)
except OSError:
print(
("Fatal Error: Unable to create library " "directory {} ").format(
lib_dir
)
)
sys.exit(1)
async def ensure_config_file(hass: "core.HomeAssistant", config_dir: str) -> str:
"""Ensure configuration file exists."""
import homeassistant.config as config_util
config_path = await config_util.async_ensure_config_exists(hass, config_dir)
if config_path is None:
print("Error getting configuration path")
sys.exit(1)
return config_path
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
import homeassistant.config as config_util
parser = argparse.ArgumentParser(
description="Home Assistant: Observe, Control, Automate."
)
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"--demo-mode", action="store_true", help="Start Home Assistant in demo mode"
)
parser.add_argument(
"--debug", action="store_true", help="Start Home Assistant in debug mode"
)
parser.add_argument(
"--open-ui", action="store_true", help="Open the webinterface in a browser"
)
parser.add_argument(
"--skip-pip",
action="store_true",
help="Skips pip install of required packages on startup",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging to file."
)
parser.add_argument(
"--pid-file",
metavar="path_to_pid_file",
default=None,
help="Path to PID file useful for running as daemon",
)
parser.add_argument(
"--log-rotate-days",
type=int,
default=None,
help="Enables daily log rotation and keeps up to the specified days",
)
parser.add_argument(
"--log-file",
type=str,
default=None,
help="Log file to write to. If not set, CONFIG/home-assistant.log " "is used",
)
parser.add_argument(
"--log-no-color", action="store_true", help="Disable color logs"
)
parser.add_argument(
"--runner",
action="store_true",
help=f"On restart exit with code {RESTART_EXIT_CODE}",
)
parser.add_argument(
"--script", nargs=argparse.REMAINDER, help="Run one of the embedded scripts"
)
if os.name == "posix":
parser.add_argument(
"--daemon", action="store_true", help="Run Home Assistant as daemon"
)
arguments = parser.parse_args()
if os.name != "posix" or arguments.debug or arguments.runner:
setattr(arguments, "daemon", False)
return arguments
def daemonize() -> None:
"""Move current process to daemon process."""
# Create first fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# Decouple fork
os.setsid()
# Create second fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# redirect standard file descriptors to devnull
infd = open(os.devnull, "r")
outfd = open(os.devnull, "a+")
sys.stdout.flush()
sys.stderr.flush()
os.dup2(infd.fileno(), sys.stdin.fileno())
os.dup2(outfd.fileno(), sys.stdout.fileno())
os.dup2(outfd.fileno(), sys.stderr.fileno())
def check_pid(pid_file: str) -> None:
"""Check that Home Assistant is not already running."""
# Check pid file
try:
with open(pid_file, "r") as file:
pid = int(file.readline())
except OSError:
# PID File does not exist
return
# If we just restarted, we just found our own pidfile.
if pid == os.getpid():
return
try:
os.kill(pid, 0)
except OSError:
# PID does not exist
return
print("Fatal Error: HomeAssistant is already running.")
sys.exit(1)
def write_pid(pid_file: str) -> None:
"""Create a PID File."""
pid = os.getpid()
try:
with open(pid_file, "w") as file:
file.write(str(pid))
except OSError:
print(f"Fatal Error: Unable to write pid file {pid_file}")
sys.exit(1)
def closefds_osx(min_fd: int, max_fd: int) -> None:
"""Make sure file descriptors get closed when we restart.
We cannot call close on guarded fds, and we cannot easily test which fds
are guarded. But we can set the close-on-exec flag on everything we want to
get rid of.
"""
from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC
for _fd in range(min_fd, max_fd):
try:
val = fcntl(_fd, F_GETFD)
if not val & FD_CLOEXEC:
fcntl(_fd, F_SETFD, val | FD_CLOEXEC)
except OSError:
pass
def cmdline() -> List[str]:
"""Collect path and arguments to re-execute the current hass instance."""
if os.path.basename(sys.argv[0]) == "__main__.py":
modulepath = os.path.dirname(sys.argv[0])
os.environ["PYTHONPATH"] = os.path.dirname(modulepath)
return [sys.executable] + [arg for arg in sys.argv if arg != "--daemon"]
return [arg for arg in sys.argv if arg != "--daemon"]
async def setup_and_run_hass(config_dir: str, args: argparse.Namespace) -> int:
"""Set up HASS and run."""
# pylint: disable=redefined-outer-name
from homeassistant import bootstrap, core
hass = core.HomeAssistant()
if args.demo_mode:
config: Dict[str, Any] = {"frontend": {}, "demo": {}}
bootstrap.async_from_config_dict(
config,
hass,
config_dir=config_dir,
verbose=args.verbose,
skip_pip=args.skip_pip,
log_rotate_days=args.log_rotate_days,
log_file=args.log_file,
log_no_color=args.log_no_color,
)
else:
config_file = await ensure_config_file(hass, config_dir)
print("Config directory:", config_dir)
await bootstrap.async_from_config_file(
config_file,
hass,
verbose=args.verbose,
skip_pip=args.skip_pip,
log_rotate_days=args.log_rotate_days,
log_file=args.log_file,
log_no_color=args.log_no_color,
)
if args.open_ui and hass.config.api is not None:
import webbrowser
hass.add_job(webbrowser.open, hass.config.api.base_url)
return await hass.async_run()
def try_to_restart() -> None:
"""Attempt to clean up state and start a new Home Assistant instance."""
# Things should be mostly shut down already at this point, now just try
# to clean up things that may have been left behind.
sys.stderr.write("Home Assistant attempting to restart.\n")
# Count remaining threads, ideally there should only be one non-daemonized
# thread left (which is us). Nothing we really do with it, but it might be
# useful when debugging shutdown/restart issues.
try:
nthreads = sum(
thread.is_alive() and not thread.daemon for thread in threading.enumerate()
)
if nthreads > 1:
sys.stderr.write(f"Found {nthreads} non-daemonic threads.\n")
# Somehow we sometimes seem to trigger an assertion in the python threading
# module. It seems we find threads that have no associated OS level thread
# which are not marked as stopped at the python level.
except AssertionError:
sys.stderr.write("Failed to count non-daemonic threads.\n")
# Try to not leave behind open filedescriptors with the emphasis on try.
try:
max_fd = os.sysconf("SC_OPEN_MAX")
except ValueError:
max_fd = 256
if platform.system() == "Darwin":
closefds_osx(3, max_fd)
else:
os.closerange(3, max_fd)
# Now launch into a new instance of Home Assistant. If this fails we
# fall through and exit with error 100 (RESTART_EXIT_CODE) in which case
# systemd will restart us when RestartForceExitStatus=100 is set in the
# systemd.service file.
sys.stderr.write("Restarting Home Assistant\n")
args = cmdline()
os.execv(args[0], args)
def main() -> int:
"""Start Home Assistant."""
validate_python()
monkey_patch_needed = sys.version_info[:3] < (3, 6, 3)
if monkey_patch_needed and os.environ.get("HASS_NO_MONKEY") != "1":
monkey_patch.disable_c_asyncio()
monkey_patch.patch_weakref_tasks()
set_loop()
# Run a simple daemon runner process on Windows to handle restarts
if os.name == "nt" and "--runner" not in sys.argv:
nt_args = cmdline() + ["--runner"]
while True:
try:
subprocess.check_call(nt_args)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
except subprocess.CalledProcessError as exc:
if exc.returncode != RESTART_EXIT_CODE:
sys.exit(exc.returncode)
args = get_arguments()
if args.script is not None:
from homeassistant import scripts
return scripts.run(args.script)
config_dir = os.path.join(os.getcwd(), args.config)
ensure_config_path(config_dir)
# Daemon functions
if args.pid_file:
check_pid(args.pid_file)
if args.daemon:
daemonize()
if args.pid_file:
write_pid(args.pid_file)
from homeassistant.util.async_ import asyncio_run
exit_code = asyncio_run(setup_and_run_hass(config_dir, args))
if exit_code == RESTART_EXIT_CODE and not args.runner:
try_to_restart()
return exit_code # type: ignore
if __name__ == "__main__":
sys.exit(main())
|
[] |
[] |
[
"PYTHONPATH",
"HASS_NO_MONKEY"
] |
[]
|
["PYTHONPATH", "HASS_NO_MONKEY"]
|
python
| 2 | 0 | |
pkg/asset/installconfig/openstack/validate.go
|
package openstack
import (
"os"
"k8s.io/apimachinery/pkg/util/validation/field"
"github.com/openshift/installer/pkg/asset/installconfig/openstack/validation"
"github.com/openshift/installer/pkg/types"
"github.com/sirupsen/logrus"
)
// Validate validates the given installconfig for OpenStack platform
func Validate(ic *types.InstallConfig) error {
if skip := os.Getenv("OPENSHFIT_INSTALL_SKIP_PREFLIGHT_VALIDATIONS"); skip == "1" {
logrus.Warnf("OVERRIDE: pre-flight validation disabled.")
return nil
}
ci, err := validation.GetCloudInfo(ic)
if err != nil {
return err
}
allErrs := field.ErrorList{}
allErrs = append(allErrs, validation.ValidatePlatform(ic.Platform.OpenStack, ic.Networking, ci)...)
if ic.ControlPlane.Platform.OpenStack != nil {
allErrs = append(allErrs, validation.ValidateMachinePool(ic.ControlPlane.Platform.OpenStack, ci, true, field.NewPath("controlPlane", "platform", "openstack"))...)
}
for idx, compute := range ic.Compute {
fldPath := field.NewPath("compute").Index(idx)
if compute.Platform.OpenStack != nil {
allErrs = append(
allErrs,
validation.ValidateMachinePool(compute.Platform.OpenStack, ci, false, fldPath.Child("platform", "openstack"))...)
}
}
return allErrs.ToAggregate()
}
|
[
"\"OPENSHFIT_INSTALL_SKIP_PREFLIGHT_VALIDATIONS\""
] |
[] |
[
"OPENSHFIT_INSTALL_SKIP_PREFLIGHT_VALIDATIONS"
] |
[]
|
["OPENSHFIT_INSTALL_SKIP_PREFLIGHT_VALIDATIONS"]
|
go
| 1 | 0 | |
src/test/java/net/sourceforge/myvd/test/ldap/TestAddAttribute.java
|
/*
* Copyright 2008 Marc Boorshtein
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sourceforge.myvd.test.ldap;
import com.novell.ldap.LDAPAttribute;
import com.novell.ldap.LDAPAttributeSet;
import com.novell.ldap.LDAPConnection;
import com.novell.ldap.LDAPEntry;
import com.novell.ldap.LDAPException;
import com.novell.ldap.LDAPSearchResults;
import net.sourceforge.myvd.test.util.OpenLDAPUtils;
import net.sourceforge.myvd.test.util.StartMyVD;
import net.sourceforge.myvd.test.util.StartOpenLDAP;
import net.sourceforge.myvd.test.util.Util;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import static org.junit.Assert.*;
public class TestAddAttribute {
private static StartOpenLDAP baseServer;
private static StartOpenLDAP internalServer;
private static StartOpenLDAP externalServer;
private static StartMyVD server;
@BeforeClass
public static void setUp() throws Exception {
OpenLDAPUtils.killAllOpenLDAPS();
baseServer = new StartOpenLDAP();
baseServer.startServer(System.getenv("PROJ_DIR") + "/test/Base", 10983, "cn=admin,dc=domain,dc=com", "manager");
internalServer = new StartOpenLDAP();
internalServer.startServer(System.getenv("PROJ_DIR") + "/test/InternalUsersCustom", 11983,
"cn=admin,ou=internal,dc=domain,dc=com", "manager");
externalServer = new StartOpenLDAP();
externalServer.startServer(System.getenv("PROJ_DIR") + "/test/ExternalUsers", 12983,
"cn=admin,ou=external,dc=domain,dc=com", "manager");
server = new StartMyVD();
server.startServer(System.getenv("PROJ_DIR") + "/test/TestServer/addattribute.props", 50983);
}
@AfterClass
public static void tearDown() throws Exception {
baseServer.stopServer();
internalServer.stopServer();
externalServer.stopServer();
server.stopServer();
}
@Test
public void testUserSearchNoAttrs() throws LDAPException {
LDAPAttributeSet attribs = new LDAPAttributeSet();
attribs.add(new LDAPAttribute("objectClass", "customPerson"));
// attribs.getAttribute("objectClass").addValue("customPerson");
attribs.add(new LDAPAttribute("cn", "Test User"));
attribs.add(new LDAPAttribute("sn", "User"));
// attribs.add(new LDAPAttribute("testAttrib", "testVal"));
attribs.add(new LDAPAttribute("uid", "testUser"));
attribs.add(new LDAPAttribute("userPassword", "secret"));
attribs.add(new LDAPAttribute("o", "myorg"));
attribs.add(new LDAPAttribute("sumNum", "5"));
// attribs.add(new LDAPAttribute("globalTestAttrib","globalTestVal"));
LDAPEntry entry2 = new LDAPEntry("cn=Test User,ou=internal,o=mycompany,c=us", attribs);
LDAPConnection con = new LDAPConnection();
con.connect("localhost", 50983);
// con.bind(3,"cn=admin,o=mycompany","manager".getBytes());
LDAPSearchResults res = con.search("o=mycompany,c=us", 2, "(cn=Test User)", new String[0], false);
/*
* if (results.size() != 3) { fail("incorrect number of result sets : "
* + results.size()); return; }
*/
int size = 0;
while (res.hasMore()) {
LDAPEntry fromDir = res.next();
LDAPEntry controlEntry = null;// control.get(fromDir.getEntry().getDN());
if (size == 0) {
controlEntry = entry2;
} else if (size == 1) {
controlEntry = null;
} else {
controlEntry = null;
}
if (controlEntry == null) {
fail("Entry " + fromDir.getDN() + " should not be returned");
return;
}
if (!Util.compareEntry(fromDir, controlEntry)) {
fail("The entry was not correct : " + fromDir.toString());
return;
}
size++;
}
if (size != 1) {
fail("Not the correct number of entries : " + size);
}
con.disconnect();
}
@Test
public void testUserSearchWithAttrs() throws LDAPException {
LDAPAttributeSet attribs = new LDAPAttributeSet();
attribs.add(new LDAPAttribute("uid", "testUser"));
attribs.add(new LDAPAttribute("o", "myorg"));
LDAPEntry entry2 = new LDAPEntry("cn=Test User,ou=internal,o=mycompany,c=us", attribs);
LDAPConnection con = new LDAPConnection();
con.connect("localhost", 50983);
// con.bind(3,"cn=admin,o=mycompany","manager".getBytes());
LDAPSearchResults res = con.search("o=mycompany,c=us", 2, "(cn=Test User)", new String[] { "uid", "o" }, false);
/*
* if (results.size() != 3) { fail("incorrect number of result sets : "
* + results.size()); return; }
*/
int size = 0;
while (res.hasMore()) {
LDAPEntry fromDir = res.next();
LDAPEntry controlEntry = null;// control.get(fromDir.getEntry().getDN());
if (size == 0) {
controlEntry = entry2;
} else if (size == 1) {
controlEntry = null;
} else {
controlEntry = null;
}
if (controlEntry == null) {
fail("Entry " + fromDir.getDN() + " should not be returned");
return;
}
if (!Util.compareEntry(fromDir, controlEntry)) {
fail("The entry was not correct : " + fromDir.toString());
return;
}
size++;
}
if (size != 1) {
fail("Not the correct number of entries : " + size);
}
con.disconnect();
}
@Test
public void testNoAttr() throws LDAPException {
LDAPAttributeSet attribs = new LDAPAttributeSet();
attribs.add(new LDAPAttribute("ou", "internal"));
attribs.add(new LDAPAttribute("objectClass", "organizationalUnit"));
LDAPEntry entry2 = new LDAPEntry("ou=internal,o=mycompany,c=us", attribs);
LDAPConnection con = new LDAPConnection();
con.connect("localhost", 50983);
// con.bind(3,"cn=admin,o=mycompany","manager".getBytes());
LDAPSearchResults res = con.search("ou=internal,o=mycompany,c=us", 0, "(objectClass=*)", new String[0], false);
/*
* if (results.size() != 3) { fail("incorrect number of result sets : "
* + results.size()); return; }
*/
int size = 0;
while (res.hasMore()) {
LDAPEntry fromDir = res.next();
LDAPEntry controlEntry = null;// control.get(fromDir.getEntry().getDN());
if (size == 0) {
controlEntry = entry2;
} else if (size == 1) {
controlEntry = null;
} else {
controlEntry = null;
}
if (controlEntry == null) {
fail("Entry " + fromDir.getDN() + " should not be returned");
return;
}
if (!Util.compareEntry(fromDir, controlEntry)) {
fail("The entry was not correct : " + fromDir.toString());
return;
}
size++;
}
if (size != 1) {
fail("Not the correct number of entries : " + size);
}
con.disconnect();
}
}
|
[
"\"PROJ_DIR\"",
"\"PROJ_DIR\"",
"\"PROJ_DIR\"",
"\"PROJ_DIR\""
] |
[] |
[
"PROJ_DIR"
] |
[]
|
["PROJ_DIR"]
|
java
| 1 | 0 | |
web/sca/clients/clients.go
|
/*
Copyright <holder> All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
History:
Date Who ID Description
-------- --- --- -----------
01/13/19 nanjj Initial code
*/
package clients
import (
"crypto/tls"
"fmt"
"os"
"sync"
"github.com/IBM/cloudland/web/sca/interceptors/xrequestid"
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
)
const (
API = "api"
)
var (
clientconns = &sync.Map{}
locker = &sync.Mutex{}
)
func GetClientConn(name string) (conn *grpc.ClientConn) {
return getClientConn(name)
}
func getClientConn(name string) (conn *grpc.ClientConn) {
conn = clientConn(getEndpoint(name))
return
}
func clientConn(endpoint string) (conn *grpc.ClientConn) {
conn = load(endpoint)
if conn == nil {
conn = dial(endpoint)
}
return
}
func load(endpoint string) (conn *grpc.ClientConn) {
if v, ok := clientconns.Load(endpoint); ok {
conn = v.(*grpc.ClientConn)
state := conn.GetState()
if state <= connectivity.Ready {
return
} else {
conn.Close()
conn = nil
}
}
return
}
func dial(endpoint string) (conn *grpc.ClientConn) {
locker.Lock()
defer locker.Unlock()
if conn = load(endpoint); conn != nil {
return
}
var err error
var opts []grpc.DialOption
if os.Getenv("TLS_ENABLE") == "true" {
creds := credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
})
opts = []grpc.DialOption{grpc.WithTransportCredentials(creds)}
} else {
opts = []grpc.DialOption{grpc.WithInsecure()}
}
unarys := []grpc.UnaryClientInterceptor{}
streams := []grpc.StreamClientInterceptor{}
if tracer := opentracing.GlobalTracer(); tracer != nil {
unarys = append(unarys, otgrpc.OpenTracingClientInterceptor(tracer))
streams = append(streams, otgrpc.OpenTracingStreamClientInterceptor(tracer))
}
unarys = append(unarys, xrequestid.UnaryClientInterceptor)
streams = append(streams, xrequestid.StreamClientInterceptor)
opts = append(opts,
grpc.WithUnaryInterceptor(middleware.ChainUnaryClient(unarys...)),
grpc.WithStreamInterceptor(middleware.ChainStreamClient(streams...)))
if conn, err = grpc.Dial(endpoint, opts...); err != nil {
logrus.Fatal(err)
return
}
clientconns.Store(endpoint, conn)
return
}
func getProxy(name string, fallbacks ...string) (proxy string) {
return getConfigString(name, "proxy", fallbacks...)
}
func getEndpoint(name string, fallbacks ...string) (endpoint string) {
endpoint = getConfigString(name, "endpoint", fallbacks...)
return
}
func getDebug(name string) (debugging bool) {
return viper.GetBool(fmt.Sprintf("%s.%s", name, "debug"))
}
func getConfigString(name, config string, fallbacks ...string) (value string) {
value = viper.GetString(fmt.Sprintf("%s.%s", name, config))
if value == "" { // fall back to api value
if len(fallbacks) == 0 {
fallbacks = append(fallbacks, API)
}
for _, name = range fallbacks {
if name == "" {
break
}
value = viper.GetString(fmt.Sprintf("%s.%s", name, config))
if value != "" {
break
}
}
}
return
}
|
[
"\"TLS_ENABLE\""
] |
[] |
[
"TLS_ENABLE"
] |
[]
|
["TLS_ENABLE"]
|
go
| 1 | 0 | |
bluelog/settings.py
|
# -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:Modify: oriyao
:url: http://greyli.com
:copyright: © 2018 Grey Li <[email protected]>
:license: MIT, see LICENSE for more details.
"""
import os
import sys
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# SQLite URI compatible
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
class BaseConfig(object):
SECRET_KEY = os.getenv('SECRET_KEY', 'dev key')
# Bootstrap调用本地文件
BOOTSTRAP_SERVE_LOCAL = True
# Flask-DebugToolbar 调试
DEBUG_TB_INTERCEPT_REDIRECTS = False
DEBUG_TB_ENABLED = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
CKEDITOR_ENABLE_CSRF = True
CKEDITOR_FILE_UPLOADER = 'admin.upload_image'
MAIL_SERVER = os.getenv('MAIL_SERVER')
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = os.getenv('MAIL_USERNAME')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = ('Bluelog Admin', MAIL_USERNAME)
BLUELOG_EMAIL = os.getenv('BLUELOG_EMAIL')
BLUELOG_POST_PER_PAGE = 10
BLUELOG_MANAGE_POST_PER_PAGE = 15
BLUELOG_COMMENT_PER_PAGE = 15
# ('theme name', 'display name')
BLUELOG_THEMES = {'perfect_blue': 'Perfect Blue', 'black_swan': 'Black Swan'}
BLUELOG_SLOW_QUERY_THRESHOLD = 1
BLUELOG_UPLOAD_PATH = os.path.join(basedir, 'uploads')
BLUELOG_ALLOWED_IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg', 'gif']
class DevelopmentConfig(BaseConfig):
SQLALCHEMY_DATABASE_URI = prefix + os.path.join(basedir, 'data-dev.db')
class TestingConfig(BaseConfig):
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' # in-memory database
class ProductionConfig(BaseConfig):
LOGPATH = os.path.join(basedir, 'logs/oriyao2020.log')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', prefix + os.path.join(basedir, 'data.db'))
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig
}
|
[] |
[] |
[
"MAIL_SERVER",
"MAIL_PASSWORD",
"DATABASE_URL",
"BLUELOG_EMAIL",
"SECRET_KEY",
"MAIL_USERNAME"
] |
[]
|
["MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "BLUELOG_EMAIL", "SECRET_KEY", "MAIL_USERNAME"]
|
python
| 6 | 0 | |
server/app/settings.py
|
#
# Copyright (c) 2017 - Bambora Inc. <http://dev.na.bambora.com>
# MIT licensed. Feel free to use and abuse.
#
import os
import base64
import boto3
import decimal
# NOTE: A merchant account on our sandbox env (sandbox-web.na.bambora.com) is only
# required when testing Visa Checkout. All other payment types can be tested on
# web.na.bambora.com / api.na.bambora.com.
# Bambora Payment APIs Server base URL. Defaults to 'https://api.na.bambora.com'
base_url = os.environ.get('SERVER_URL_BASE', 'https://api.na.bambora.com')
base_querystring_sandbox_url = os.environ.get('QUERYSTRING_SERVER_URL_BASE', 'https://sandbox-web.na.bambora.com')
# Bambora params needed for authentication include Merchant ID & API Passcode.
# --> More info here: https://dev.na.bambora.com/docs/guides/merchant_quickstart/
try:
# Open a client connection to AWS SSM and get secrets
ssmclient = boto3.client('ssm')
merchant_id = ssmclient.get_parameter(Name="paymentapidemo-MERCHANT_ID", WithDecryption=True)['Parameter']['Value']
api_passcode = ssmclient.get_parameter(Name="paymentapidemo-API_PASSCODE", WithDecryption=True)['Parameter']['Value']
report_api_passcode = ssmclient.get_parameter(Name="paymentapidemo-REPORT_API_PASSCODE", WithDecryption=True)['Parameter']['Value']
sandbox_merchant_id = ssmclient.get_parameter(Name="paymentapidemo-SANDBOX_MERCHANT_ID", WithDecryption=True)['Parameter']['Value']
sandbox_api_passcode = ssmclient.get_parameter(Name="paymentapidemo-SANDBOX_API_PASSCODE", WithDecryption=True)['Parameter'][
'Value']
sandbox_hash_key = ssmclient.get_parameter(Name="paymentapidemo-SANDBOX_HASH_KEY", WithDecryption=True)['Parameter']['Value']
sandbox_visa_checkout_api_key = \
ssmclient.get_parameter(Name="paymentapidemo-SANDBOX_VISA_CHECKOUT_API_KEY", WithDecryption=True)['Parameter']['Value']
secret_key = ssmclient.get_parameter(Name="paymentapidemo-SECRET_KEY", WithDecryption=True)['Parameter']['Value']
except Exception as e:
merchant_id = os.environ.get('MERCHANT_ID')
api_passcode = os.environ.get('API_PASSCODE')
report_api_passcode = os.environ.get('REPORT_API_PASSCODE')
sandbox_merchant_id = os.environ.get('SANDBOX_MERCHANT_ID')
sandbox_api_passcode = os.environ.get('SANDBOX_API_PASSCODE')
sandbox_hash_key = os.environ.get('SANDBOX_HASH_KEY')
sandbox_visa_checkout_api_key = os.environ.get('SANDBOX_VISA_CHECKOUT_API_KEY')
secret_key = os.environ.get('SECRET_KEY')
if (merchant_id is None or
api_passcode is None or
sandbox_api_passcode is None or
sandbox_api_passcode is None or
sandbox_hash_key is None or
sandbox_visa_checkout_api_key is None):
print('Setup incomplete. Please set all environment variables and then'
' start this app!')
exit(0)
print('-> API Server: ' + base_url)
print('-> Merchant ID: ' + merchant_id)
# http://stackoverflow.com/questions/1995615/how-can-i-format-a-decimal-to-always-show-2-decimal-places
TWO_PLACES = decimal.Decimal(10) ** -2 # same as Decimal('0.01')
def create_auth_headers() -> dict:
passcode = merchant_id + ':' + api_passcode
passcode = base64.b64encode(passcode.encode('utf-8')).decode()
headers = {
'Authorization': 'Passcode {}'.format(passcode),
'Content-Type': 'application/json'
}
return headers
|
[] |
[] |
[
"SANDBOX_VISA_CHECKOUT_API_KEY",
"SANDBOX_HASH_KEY",
"SANDBOX_MERCHANT_ID",
"SERVER_URL_BASE",
"REPORT_API_PASSCODE",
"SECRET_KEY",
"API_PASSCODE",
"MERCHANT_ID",
"SANDBOX_API_PASSCODE",
"QUERYSTRING_SERVER_URL_BASE"
] |
[]
|
["SANDBOX_VISA_CHECKOUT_API_KEY", "SANDBOX_HASH_KEY", "SANDBOX_MERCHANT_ID", "SERVER_URL_BASE", "REPORT_API_PASSCODE", "SECRET_KEY", "API_PASSCODE", "MERCHANT_ID", "SANDBOX_API_PASSCODE", "QUERYSTRING_SERVER_URL_BASE"]
|
python
| 10 | 0 | |
vendor/gocloud.dev/blob/gcsblob/gcsblob.go
|
// Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gcsblob provides a blob implementation that uses GCS. Use OpenBucket
// to construct a *blob.Bucket.
//
// URLs
//
// For blob.OpenBucket, gcsblob registers for the scheme "gs".
// The default URL opener will set up a connection using default credentials
// from the environment, as described in
// https://cloud.google.com/docs/authentication/production.
// Some environments, such as GCE, come without a private key. In such cases
// the IAM Credentials API will be configured for use in Options.MakeSignBytes,
// which will introduce latency to any and all calls to bucket.SignedURL
// that you can avoid by installing a service account credentials file or
// obtaining and configuring a private key:
// https://cloud.google.com/iam/docs/creating-managing-service-account-keys
//
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
// Escaping
//
// Go CDK supports all UTF-8 strings; to make this work with services lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for gcsblob:
// - Blob keys: ASCII characters 10 and 13 are escaped to "__0x<hex>__".
// Additionally, the "/" in "../" is escaped in the same way.
//
// As
//
// gcsblob exposes the following types for As:
// - Bucket: *storage.Client
// - Error: *googleapi.Error
// - ListObject: storage.ObjectAttrs
// - ListOptions.BeforeList: *storage.Query
// - Reader: *storage.Reader
// - ReaderOptions.BeforeRead: **storage.ObjectHandle, *storage.Reader (if accessing both, must be in that order)
// - Attributes: storage.ObjectAttrs
// - CopyOptions.BeforeCopy: *CopyObjectHandles, *storage.Copier (if accessing both, must be in that order)
// - WriterOptions.BeforeWrite: **storage.ObjectHandle, *storage.Writer (if accessing both, must be in that order)
package gcsblob // import "gocloud.dev/blob/gcsblob"
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"sort"
"strings"
"sync"
"time"
"cloud.google.com/go/compute/metadata"
"cloud.google.com/go/storage"
"github.com/google/wire"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"gocloud.dev/blob"
"gocloud.dev/blob/driver"
"gocloud.dev/gcerrors"
"gocloud.dev/gcp"
"gocloud.dev/internal/escape"
"gocloud.dev/internal/useragent"
)
const defaultPageSize = 1000
func init() {
blob.DefaultURLMux().RegisterBucket(Scheme, new(lazyCredsOpener))
}
// Set holds Wire providers for this package.
var Set = wire.NewSet(
wire.Struct(new(URLOpener), "Client"),
)
// readDefaultCredentials gets the field values from the supplied JSON data.
// For its possible formats please see
// https://cloud.google.com/iam/docs/creating-managing-service-account-keys#iam-service-account-keys-create-go
//
// Use "golang.org/x/oauth2/google".DefaultCredentials.JSON to get
// the contents of the preferred credential file.
//
// Returns null-values for fields that have not been obtained.
func readDefaultCredentials(credFileAsJSON []byte) (AccessID string, PrivateKey []byte) {
// For example, a credentials file as generated for service accounts through the web console.
var contentVariantA struct {
ClientEmail string `json:"client_email"`
PrivateKey string `json:"private_key"`
}
if err := json.Unmarshal(credFileAsJSON, &contentVariantA); err == nil {
AccessID = contentVariantA.ClientEmail
PrivateKey = []byte(contentVariantA.PrivateKey)
}
if AccessID != "" {
return
}
// If obtained through the REST API.
var contentVariantB struct {
Name string `json:"name"`
PrivateKeyData string `json:"privateKeyData"`
}
if err := json.Unmarshal(credFileAsJSON, &contentVariantB); err == nil {
nextFieldIsAccessID := false
for _, s := range strings.Split(contentVariantB.Name, "/") {
if nextFieldIsAccessID {
AccessID = s
break
}
nextFieldIsAccessID = s == "serviceAccounts"
}
PrivateKey = []byte(contentVariantB.PrivateKeyData)
}
return
}
// lazyCredsOpener obtains Application Default Credentials on the first call
// to OpenBucketURL.
type lazyCredsOpener struct {
init sync.Once
opener *URLOpener
err error
}
func (o *lazyCredsOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
o.init.Do(func() {
var opts Options
var creds *google.Credentials
if os.Getenv("STORAGE_EMULATOR_HOST") != "" {
creds, _ = google.CredentialsFromJSON(ctx, []byte(`{"type": "service_account", "project_id": "my-project-id"}`))
} else {
var err error
creds, err = gcp.DefaultCredentials(ctx)
if err != nil {
o.err = err
return
}
// Populate default values from credentials files, where available.
opts.GoogleAccessID, opts.PrivateKey = readDefaultCredentials(creds.JSON)
// … else, on GCE, at least get the instance's main service account.
if opts.GoogleAccessID == "" && metadata.OnGCE() {
mc := metadata.NewClient(nil)
opts.GoogleAccessID, _ = mc.Email("")
}
}
// Provide a default factory for SignBytes for environments without a private key.
if len(opts.PrivateKey) <= 0 && opts.GoogleAccessID != "" {
iam := new(credentialsClient)
// We cannot hold onto the first context: it might've been cancelled already.
ctx := context.Background()
opts.MakeSignBytes = iam.CreateMakeSignBytesWith(ctx, opts.GoogleAccessID)
}
client, err := gcp.NewHTTPClient(gcp.DefaultTransport(), creds.TokenSource)
if err != nil {
o.err = err
return
}
o.opener = &URLOpener{Client: client, Options: opts}
})
if o.err != nil {
return nil, fmt.Errorf("open bucket %v: %v", u, o.err)
}
return o.opener.OpenBucketURL(ctx, u)
}
// Scheme is the URL scheme gcsblob registers its URLOpener under on
// blob.DefaultMux.
const Scheme = "gs"
// URLOpener opens GCS URLs like "gs://mybucket".
//
// The URL host is used as the bucket name.
//
// The following query parameters are supported:
//
// - access_id: sets Options.GoogleAccessID
// - private_key_path: path to read for Options.PrivateKey
//
// Currently their use is limited to SignedURL.
type URLOpener struct {
// Client must be set to a non-nil HTTP client authenticated with
// Cloud Storage scope or equivalent.
Client *gcp.HTTPClient
// Options specifies the default options to pass to OpenBucket.
Options Options
}
// OpenBucketURL opens the GCS bucket with the same name as the URL's host.
func (o *URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {
opts, err := o.forParams(ctx, u.Query())
if err != nil {
return nil, fmt.Errorf("open bucket %v: %v", u, err)
}
return OpenBucket(ctx, o.Client, u.Host, opts)
}
func (o *URLOpener) forParams(ctx context.Context, q url.Values) (*Options, error) {
for k := range q {
if k != "access_id" && k != "private_key_path" {
return nil, fmt.Errorf("invalid query parameter %q", k)
}
}
opts := new(Options)
*opts = o.Options
if accessID := q.Get("access_id"); accessID != "" && accessID != opts.GoogleAccessID {
opts.GoogleAccessID = accessID
opts.PrivateKey = nil // Clear any previous key unrelated to the new accessID.
// Clear this as well to prevent calls with the old and mismatched accessID.
opts.MakeSignBytes = nil
}
if keyPath := q.Get("private_key_path"); keyPath != "" {
pk, err := ioutil.ReadFile(keyPath)
if err != nil {
return nil, err
}
opts.PrivateKey = pk
} else if _, exists := q["private_key_path"]; exists {
// A possible default value has been cleared by setting this to an empty value:
// The private key might have expired, or falling back to SignBytes/MakeSignBytes
// is intentional such as for tests or involving a key stored in a HSM/TPM.
opts.PrivateKey = nil
}
return opts, nil
}
// Options sets options for constructing a *blob.Bucket backed by GCS.
type Options struct {
// GoogleAccessID represents the authorizer for SignedURL.
// Required to use SignedURL.
// See https://godoc.org/cloud.google.com/go/storage#SignedURLOptions.
GoogleAccessID string
// PrivateKey is the Google service account private key.
// Exactly one of PrivateKey or SignBytes must be non-nil to use SignedURL.
// See https://godoc.org/cloud.google.com/go/storage#SignedURLOptions.
PrivateKey []byte
// SignBytes is a function for implementing custom signing.
// Exactly one of PrivateKey, SignBytes, or MakeSignBytes must be non-nil to use SignedURL.
// See https://godoc.org/cloud.google.com/go/storage#SignedURLOptions.
SignBytes func([]byte) ([]byte, error)
// MakeSignBytes is a factory for functions that are being used in place of an empty SignBytes.
// If your implementation of 'SignBytes' needs a request context, set this instead.
MakeSignBytes func(requestCtx context.Context) SignBytesFunc
}
// SignBytesFunc is shorthand for the signature of Options.SignBytes.
type SignBytesFunc func([]byte) ([]byte, error)
// openBucket returns a GCS Bucket that communicates using the given HTTP client.
func openBucket(ctx context.Context, client *gcp.HTTPClient, bucketName string, opts *Options) (*bucket, error) {
if client == nil {
return nil, errors.New("gcsblob.OpenBucket: client is required")
}
if bucketName == "" {
return nil, errors.New("gcsblob.OpenBucket: bucketName is required")
}
clientOpts := []option.ClientOption{option.WithHTTPClient(useragent.HTTPClient(&client.Client, "blob"))}
if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" {
clientOpts = []option.ClientOption{
option.WithoutAuthentication(),
option.WithEndpoint("http://" + host + "/storage/v1/"),
option.WithHTTPClient(http.DefaultClient),
}
}
// We wrap the provided http.Client to add a Go CDK User-Agent.
c, err := storage.NewClient(ctx, clientOpts...)
if err != nil {
return nil, err
}
if opts == nil {
opts = &Options{}
}
return &bucket{name: bucketName, client: c, opts: opts}, nil
}
// OpenBucket returns a *blob.Bucket backed by an existing GCS bucket. See the
// package documentation for an example.
func OpenBucket(ctx context.Context, client *gcp.HTTPClient, bucketName string, opts *Options) (*blob.Bucket, error) {
drv, err := openBucket(ctx, client, bucketName, opts)
if err != nil {
return nil, err
}
return blob.NewBucket(drv), nil
}
// bucket represents a GCS bucket, which handles read, write and delete operations
// on objects within it.
type bucket struct {
name string
client *storage.Client
opts *Options
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// reader reads a GCS object. It implements driver.Reader.
type reader struct {
body io.ReadCloser
attrs driver.ReaderAttributes
raw *storage.Reader
}
func (r *reader) Read(p []byte) (int, error) {
return r.body.Read(p)
}
// Close closes the reader itself. It must be called when done reading.
func (r *reader) Close() error {
return r.body.Close()
}
func (r *reader) Attributes() *driver.ReaderAttributes {
return &r.attrs
}
func (r *reader) As(i interface{}) bool {
p, ok := i.(**storage.Reader)
if !ok {
return false
}
*p = r.raw
return true
}
func (b *bucket) ErrorCode(err error) gcerrors.ErrorCode {
if err == storage.ErrObjectNotExist {
return gcerrors.NotFound
}
if gerr, ok := err.(*googleapi.Error); ok {
switch gerr.Code {
case http.StatusForbidden:
return gcerrors.PermissionDenied
case http.StatusNotFound:
return gcerrors.NotFound
case http.StatusPreconditionFailed:
return gcerrors.FailedPrecondition
case http.StatusTooManyRequests:
return gcerrors.ResourceExhausted
}
}
return gcerrors.Unknown
}
func (b *bucket) Close() error {
return nil
}
// ListPaged implements driver.ListPaged.
func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) {
bkt := b.client.Bucket(b.name)
query := &storage.Query{
Prefix: escapeKey(opts.Prefix),
Delimiter: escapeKey(opts.Delimiter),
}
if opts.BeforeList != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**storage.Query)
if !ok {
return false
}
*p = query
return true
}
if err := opts.BeforeList(asFunc); err != nil {
return nil, err
}
}
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
iter := bkt.Objects(ctx, query)
pager := iterator.NewPager(iter, pageSize, string(opts.PageToken))
var objects []*storage.ObjectAttrs
nextPageToken, err := pager.NextPage(&objects)
if err != nil {
return nil, err
}
page := driver.ListPage{NextPageToken: []byte(nextPageToken)}
if len(objects) > 0 {
page.Objects = make([]*driver.ListObject, len(objects))
for i, obj := range objects {
toCopy := obj
asFunc := func(val interface{}) bool {
p, ok := val.(*storage.ObjectAttrs)
if !ok {
return false
}
*p = *toCopy
return true
}
if obj.Prefix == "" {
// Regular blob.
page.Objects[i] = &driver.ListObject{
Key: unescapeKey(obj.Name),
ModTime: obj.Updated,
Size: obj.Size,
MD5: obj.MD5,
AsFunc: asFunc,
}
} else {
// "Directory".
page.Objects[i] = &driver.ListObject{
Key: unescapeKey(obj.Prefix),
IsDir: true,
AsFunc: asFunc,
}
}
}
// GCS always returns "directories" at the end; sort them.
sort.Slice(page.Objects, func(i, j int) bool {
return page.Objects[i].Key < page.Objects[j].Key
})
}
return &page, nil
}
// As implements driver.As.
func (b *bucket) As(i interface{}) bool {
p, ok := i.(**storage.Client)
if !ok {
return false
}
*p = b.client
return true
}
// As implements driver.ErrorAs.
func (b *bucket) ErrorAs(err error, i interface{}) bool {
switch v := err.(type) {
case *googleapi.Error:
if p, ok := i.(**googleapi.Error); ok {
*p = v
return true
}
}
return false
}
// Attributes implements driver.Attributes.
func (b *bucket) Attributes(ctx context.Context, key string) (*driver.Attributes, error) {
key = escapeKey(key)
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
attrs, err := obj.Attrs(ctx)
if err != nil {
return nil, err
}
return &driver.Attributes{
CacheControl: attrs.CacheControl,
ContentDisposition: attrs.ContentDisposition,
ContentEncoding: attrs.ContentEncoding,
ContentLanguage: attrs.ContentLanguage,
ContentType: attrs.ContentType,
Metadata: attrs.Metadata,
ModTime: attrs.Updated,
Size: attrs.Size,
MD5: attrs.MD5,
AsFunc: func(i interface{}) bool {
p, ok := i.(*storage.ObjectAttrs)
if !ok {
return false
}
*p = *attrs
return true
},
}, nil
}
// NewRangeReader implements driver.NewRangeReader.
func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) {
key = escapeKey(key)
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
// Add an extra level of indirection so that BeforeRead can replace obj
// if needed. For example, ObjectHandle.If returns a new ObjectHandle.
// Also, make the Reader lazily in case this replacement happens.
objp := &obj
makeReader := func() (*storage.Reader, error) {
return (*objp).NewRangeReader(ctx, offset, length)
}
var r *storage.Reader
var rerr error
madeReader := false
if opts.BeforeRead != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(***storage.ObjectHandle); ok && !madeReader {
*p = objp
return true
}
if p, ok := i.(**storage.Reader); ok {
if !madeReader {
r, rerr = makeReader()
madeReader = true
if r == nil {
return false
}
}
*p = r
return true
}
return false
}
if err := opts.BeforeRead(asFunc); err != nil {
return nil, err
}
}
if !madeReader {
r, rerr = makeReader()
}
if rerr != nil {
return nil, rerr
}
return &reader{
body: r,
attrs: driver.ReaderAttributes{
ContentType: r.Attrs.ContentType,
ModTime: r.Attrs.LastModified,
Size: r.Attrs.Size,
},
raw: r,
}, nil
}
// escapeKey does all required escaping for UTF-8 strings to work with GCS.
func escapeKey(key string) string {
return escape.HexEscape(key, func(r []rune, i int) bool {
switch {
// GCS doesn't handle these characters (determined via experimentation).
case r[i] == 10 || r[i] == 13:
return true
// For "../", escape the trailing slash.
case i > 1 && r[i] == '/' && r[i-1] == '.' && r[i-2] == '.':
return true
}
return false
})
}
// unescapeKey reverses escapeKey.
func unescapeKey(key string) string {
return escape.HexUnescape(key)
}
// NewTypedWriter implements driver.NewTypedWriter.
func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) {
key = escapeKey(key)
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
// Add an extra level of indirection so that BeforeWrite can replace obj
// if needed. For example, ObjectHandle.If returns a new ObjectHandle.
// Also, make the Writer lazily in case this replacement happens.
objp := &obj
makeWriter := func() *storage.Writer {
w := (*objp).NewWriter(ctx)
w.CacheControl = opts.CacheControl
w.ContentDisposition = opts.ContentDisposition
w.ContentEncoding = opts.ContentEncoding
w.ContentLanguage = opts.ContentLanguage
w.ContentType = contentType
w.ChunkSize = bufferSize(opts.BufferSize)
w.Metadata = opts.Metadata
w.MD5 = opts.ContentMD5
return w
}
var w *storage.Writer
if opts.BeforeWrite != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(***storage.ObjectHandle); ok && w == nil {
*p = objp
return true
}
if p, ok := i.(**storage.Writer); ok {
if w == nil {
w = makeWriter()
}
*p = w
return true
}
return false
}
if err := opts.BeforeWrite(asFunc); err != nil {
return nil, err
}
}
if w == nil {
w = makeWriter()
}
return w, nil
}
// CopyObjectHandles holds the ObjectHandles for the destination and source
// of a Copy. It is used by the BeforeCopy As hook.
type CopyObjectHandles struct {
Dst, Src *storage.ObjectHandle
}
// Copy implements driver.Copy.
func (b *bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *driver.CopyOptions) error {
dstKey = escapeKey(dstKey)
srcKey = escapeKey(srcKey)
bkt := b.client.Bucket(b.name)
// Add an extra level of indirection so that BeforeCopy can replace the
// dst or src ObjectHandles if needed.
// Also, make the Copier lazily in case this replacement happens.
handles := CopyObjectHandles{
Dst: bkt.Object(dstKey),
Src: bkt.Object(srcKey),
}
makeCopier := func() *storage.Copier {
return handles.Dst.CopierFrom(handles.Src)
}
var copier *storage.Copier
if opts.BeforeCopy != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(**CopyObjectHandles); ok && copier == nil {
*p = &handles
return true
}
if p, ok := i.(**storage.Copier); ok {
if copier == nil {
copier = makeCopier()
}
*p = copier
return true
}
return false
}
if err := opts.BeforeCopy(asFunc); err != nil {
return err
}
}
if copier == nil {
copier = makeCopier()
}
_, err := copier.Run(ctx)
return err
}
// Delete implements driver.Delete.
func (b *bucket) Delete(ctx context.Context, key string) error {
key = escapeKey(key)
bkt := b.client.Bucket(b.name)
obj := bkt.Object(key)
return obj.Delete(ctx)
}
func (b *bucket) SignedURL(ctx context.Context, key string, dopts *driver.SignedURLOptions) (string, error) {
numSigners := 0
if b.opts.PrivateKey != nil {
numSigners++
}
if b.opts.SignBytes != nil {
numSigners++
}
if b.opts.MakeSignBytes != nil {
numSigners++
}
if b.opts.GoogleAccessID == "" || numSigners != 1 {
return "", errors.New("to use SignedURL, you must call OpenBucket with a valid Options.GoogleAccessID and exactly one of Options.PrivateKey, Options.SignBytes, or Options.MakeSignBytes")
}
key = escapeKey(key)
opts := &storage.SignedURLOptions{
Expires: time.Now().Add(dopts.Expiry),
Method: dopts.Method,
ContentType: dopts.ContentType,
GoogleAccessID: b.opts.GoogleAccessID,
PrivateKey: b.opts.PrivateKey,
SignBytes: b.opts.SignBytes,
}
if b.opts.MakeSignBytes != nil {
opts.SignBytes = b.opts.MakeSignBytes(ctx)
}
return storage.SignedURL(b.name, key, opts)
}
func bufferSize(size int) int {
if size == 0 {
return googleapi.DefaultUploadChunkSize
} else if size > 0 {
return size
}
return 0 // disable buffering
}
|
[
"\"STORAGE_EMULATOR_HOST\"",
"\"STORAGE_EMULATOR_HOST\""
] |
[] |
[
"STORAGE_EMULATOR_HOST"
] |
[]
|
["STORAGE_EMULATOR_HOST"]
|
go
| 1 | 0 | |
tests/integration/operatorhub/cmd_link_test.go
|
package integration
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/openshift/odo/tests/helper"
)
var _ = Describe("odo link command tests for OperatorHub", func() {
var commonVar helper.CommonVar
BeforeEach(func() {
commonVar = helper.CommonBeforeEach()
})
AfterEach(func() {
helper.CommonAfterEach(commonVar)
})
Context("Operators are installed in the cluster", func() {
var redisOperator string
var redisCluster string
BeforeEach(func() {
// wait till odo can see that all operators installed by setup script in the namespace
odoArgs := []string{"catalog", "list", "services"}
operators := []string{"redis-operator"}
for _, operator := range operators {
helper.WaitForCmdOut("odo", odoArgs, 5, true, func(output string) bool {
return strings.Contains(output, operator)
})
}
commonVar.CliRunner.CreateSecret("redis-secret", "password", commonVar.Project)
list := helper.Cmd("odo", "catalog", "list", "services").ShouldPass().Out()
redisOperator = regexp.MustCompile(`redis-operator\.*[a-z][0-9]\.[0-9]\.[0-9]`).FindString(list)
redisCluster = fmt.Sprintf("%s/Redis", redisOperator)
})
When("a component and a service are deployed", func() {
var componentName string
var svcFullName string
BeforeEach(func() {
helper.CopyExample(filepath.Join("source", "nodejs"), commonVar.Context)
componentName = "cmp-" + helper.RandString(6)
helper.Cmd("odo", "create", "nodejs", componentName, "--context", commonVar.Context, "--project", commonVar.Project).ShouldPass()
helper.Cmd("odo", "config", "set", "Memory", "300M", "-f", "--context", commonVar.Context).ShouldPass()
serviceName := "service-" + helper.RandString(6)
svcFullName = strings.Join([]string{"Redis", serviceName}, "/")
helper.Cmd("odo", "service", "create", redisCluster, serviceName, "--context", commonVar.Context).ShouldPass()
helper.Cmd("odo", "push", "--context", commonVar.Context).ShouldPass()
name := commonVar.CliRunner.GetRunningPodNameByComponent(componentName, commonVar.Project)
Expect(name).To(Not(BeEmpty()))
})
It("should find files in component container", func() {
helper.Cmd("odo", "exec", "--context", commonVar.Context, "--", "ls", "/project/server.js").ShouldPass()
})
When("a storage is added and deployed", func() {
BeforeEach(func() {
helper.Cmd("odo", "storage", "create", "--context", commonVar.Context).ShouldPass()
helper.Cmd("odo", "push", "--context", commonVar.Context).ShouldPass()
})
When("a link between the component and the service is created", func() {
BeforeEach(func() {
helper.Cmd("odo", "link", svcFullName, "--context", commonVar.Context).ShouldPass()
})
It("should run odo push successfully", func() {
helper.Cmd("odo", "push", "--context", commonVar.Context).ShouldPass()
})
})
})
When("a link between the component and the service is created", func() {
BeforeEach(func() {
helper.Cmd("odo", "link", svcFullName, "--context", commonVar.Context).ShouldPass()
})
It("should find the link in odo describe", func() {
stdOut := helper.Cmd("odo", "describe", "--context", commonVar.Context).ShouldPass().Out()
Expect(stdOut).To(ContainSubstring(svcFullName))
})
When("odo push is executed", func() {
BeforeEach(func() {
helper.Cmd("odo", "push", "--context", commonVar.Context).ShouldPass()
name := commonVar.CliRunner.GetRunningPodNameByComponent(componentName, commonVar.Project)
Expect(name).To(Not(BeEmpty()))
})
It("should find files in component container", func() {
helper.Cmd("odo", "exec", "--context", commonVar.Context, "--", "ls", "/project/server.js").ShouldPass()
})
It("should find the link environment variable", func() {
stdOut := helper.Cmd("odo", "exec", "--context", commonVar.Context, "--", "sh", "-c", "echo $REDIS_CLUSTERIP").ShouldPass().Out()
Expect(stdOut).To(Not(BeEmpty()))
})
It("should find the link in odo describe", func() {
stdOut := helper.Cmd("odo", "describe", "--context", commonVar.Context).ShouldPass().Out()
Expect(stdOut).To(ContainSubstring(svcFullName))
Expect(stdOut).To(ContainSubstring("Environment Variables"))
Expect(stdOut).To(ContainSubstring("REDIS_CLUSTERIP"))
})
It("should not list the service binding in `odo service list`", func() {
stdOut := helper.Cmd("odo", "service", "list", "--context", commonVar.Context).ShouldPass().Out()
Expect(stdOut).ToNot(ContainSubstring("ServiceBinding/"))
})
})
})
When("a link with between the component and the service is created with --bind-as-files", func() {
var bindingName string
BeforeEach(func() {
bindingName = "sbr-" + helper.RandString(6)
helper.Cmd("odo", "link", svcFullName, "--bind-as-files", "--name", bindingName, "--context", commonVar.Context).ShouldPass()
})
It("should display the link in odo describe", func() {
stdOut := helper.Cmd("odo", "describe", "--context", commonVar.Context).ShouldPass().Out()
Expect(stdOut).To(ContainSubstring(svcFullName))
})
When("odo push is executed", func() {
BeforeEach(func() {
helper.Cmd("odo", "push", "--context", commonVar.Context).ShouldPass()
name := commonVar.CliRunner.GetRunningPodNameByComponent(componentName, commonVar.Project)
Expect(name).To(Not(BeEmpty()))
})
It("should find files in component container", func() {
helper.Cmd("odo", "exec", "--context", commonVar.Context, "--", "ls", "/project/server.js").ShouldPass()
})
It("should find bindings for service", func() {
helper.Cmd("odo", "exec", "--context", commonVar.Context, "--", "ls", "/bindings/"+bindingName+"/clusterIP").ShouldPass()
})
It("should display the link in odo describe", func() {
stdOut := helper.Cmd("odo", "describe", "--context", commonVar.Context).ShouldPass().Out()
Expect(stdOut).To(ContainSubstring(svcFullName))
Expect(stdOut).To(ContainSubstring("Files"))
Expect(stdOut).To(ContainSubstring("/bindings/" + bindingName + "/clusterIP"))
})
})
})
})
When("getting sources, a devfile defining a component, a service and a link, and executing odo push", func() {
BeforeEach(func() {
componentName := "api" // this is the name of the component in the devfile
helper.CopyExample(filepath.Join("source", "nodejs"), commonVar.Context)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile-with-link.yaml"), filepath.Join(commonVar.Context, "devfile.yaml"))
helper.Cmd("odo", "create", componentName, "--project", commonVar.Project, "--context", commonVar.Context).ShouldPass()
helper.Cmd("odo", "push", "--context", commonVar.Context).ShouldPass()
name := commonVar.CliRunner.GetRunningPodNameByComponent(componentName, commonVar.Project)
Expect(name).To(Not(BeEmpty()))
})
It("should find files in component container", func() {
helper.Cmd("odo", "exec", "--context", commonVar.Context, "--", "ls", "/project/server.js").ShouldPass()
})
It("should find bindings for service", func() {
helper.Cmd("odo", "exec", "--context", commonVar.Context, "--", "ls", "/bindings/redis-link/clusterIP").ShouldPass()
})
It("should find owner references on link and service", func() {
if os.Getenv("KUBERNETES") == "true" {
Skip("This is a OpenShift specific scenario, skipping")
}
args := []string{"get", "servicebinding", "redis-link", "-o", "jsonpath='{.metadata.ownerReferences.*.name}'", "-n", commonVar.Project}
commonVar.CliRunner.WaitForRunnerCmdOut(args, 1, true, func(output string) bool {
return strings.Contains(output, "api-app")
})
args = []string{"get", "redis.redis.redis.opstreelabs.in", "myredis", "-o", "jsonpath='{.metadata.ownerReferences.*.name}'", "-n", commonVar.Project}
commonVar.CliRunner.WaitForRunnerCmdOut(args, 1, true, func(output string) bool {
return strings.Contains(output, "api-app")
})
})
})
})
When("one component is deployed", func() {
var context0 string
var cmp0 string
BeforeEach(func() {
context0 = helper.CreateNewContext()
cmp0 = helper.RandString(5)
helper.Cmd("odo", "create", "nodejs", cmp0, "--context", context0).ShouldPass()
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context0)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfile.yaml"), filepath.Join(context0, "devfile.yaml"))
helper.Cmd("odo", "push", "--context", context0).ShouldPass()
})
AfterEach(func() {
helper.Cmd("odo", "delete", "-f", "--context", context0).ShouldPass()
helper.DeleteDir(context0)
})
It("should fail when linking to itself", func() {
stdOut := helper.Cmd("odo", "link", cmp0, "--context", context0).ShouldFail().Err()
helper.MatchAllInOutput(stdOut, []string{cmp0, "cannot be linked with itself"})
})
It("should fail if the component doesn't exist and the service name doesn't adhere to the <service-type>/<service-name> format", func() {
helper.Cmd("odo", "link", "Redis").ShouldFail()
helper.Cmd("odo", "link", "Redis/").ShouldFail()
helper.Cmd("odo", "link", "/redis-standalone").ShouldFail()
})
When("another component is deployed", func() {
var context1 string
var cmp1 string
BeforeEach(func() {
context1 = helper.CreateNewContext()
cmp1 = helper.RandString(5)
helper.Cmd("odo", "create", "nodejs", cmp1, "--context", context1).ShouldPass()
helper.CopyExample(filepath.Join("source", "devfiles", "nodejs", "project"), context1)
helper.CopyExampleDevFile(filepath.Join("source", "devfiles", "nodejs", "devfileNestedCompCommands.yaml"), filepath.Join(context1, "devfile.yaml"))
helper.Cmd("odo", "push", "--context", context1).ShouldPass()
})
AfterEach(func() {
helper.Cmd("odo", "delete", "-f", "--context", context1).ShouldPass()
helper.DeleteDir(context1)
})
It("should link the two components successfully with service binding operator", func() {
if os.Getenv("KUBERNETES") == "true" {
// service binding operator is not installed on kubernetes
Skip("This is a OpenShift specific scenario, skipping")
}
helper.Cmd("odo", "link", cmp1, "--context", context0).ShouldPass()
helper.Cmd("odo", "push", "--context", context0).ShouldPass()
// check the link exists with the specific name
ocArgs := []string{"get", "servicebinding", strings.Join([]string{cmp0, cmp1}, "-"), "-o", "jsonpath='{.status.secret}'", "-n", commonVar.Project}
helper.WaitForCmdOut("oc", ocArgs, 1, true, func(output string) bool {
return strings.Contains(output, strings.Join([]string{cmp0, cmp1}, "-"))
})
// delete the link and undeploy it
helper.Cmd("odo", "unlink", cmp1, "--context", context0).ShouldPass()
helper.Cmd("odo", "push", "--context", context0).ShouldPass()
commonVar.CliRunner.WaitAndCheckForTerminatingState("servicebinding", commonVar.Project, 1)
})
It("should link the two components successfully without service binding operator", func() {
if os.Getenv("KUBERNETES") != "true" {
// service binding operator is not installed on kubernetes
Skip("This is a Kubernetes specific scenario, skipping")
}
helper.Cmd("odo", "link", cmp1, "--context", context0).ShouldPass()
helper.Cmd("odo", "push", "--context", context0).ShouldPass()
// check the secrets exists with the specific name
secrets := commonVar.CliRunner.GetSecrets(commonVar.Project)
Expect(secrets).To(ContainSubstring(fmt.Sprintf("%v-%v", cmp0, cmp1)))
envFromValues := commonVar.CliRunner.GetEnvRefNames(cmp0, "app", commonVar.Project)
envFound := false
for i := range envFromValues {
if strings.Contains(envFromValues[i], fmt.Sprintf("%v-%v", cmp0, cmp1)) {
envFound = true
}
}
Expect(envFound).To(BeTrue())
// delete the link and undeploy it
helper.Cmd("odo", "unlink", cmp1, "--context", context0).ShouldPass()
helper.Cmd("odo", "push", "--context", context0).ShouldPass()
// check the secrets exists with the specific name
secrets = commonVar.CliRunner.GetSecrets(commonVar.Project)
Expect(secrets).NotTo(ContainSubstring(fmt.Sprintf("%v-%v", cmp0, cmp1)))
envFromValues = commonVar.CliRunner.GetEnvRefNames(cmp0, "app", commonVar.Project)
envFound = false
for i := range envFromValues {
if strings.Contains(envFromValues[i], fmt.Sprintf("%v-%v", cmp0, cmp1)) {
envFound = true
}
}
Expect(envFound).To(BeFalse())
})
})
})
})
|
[
"\"KUBERNETES\"",
"\"KUBERNETES\"",
"\"KUBERNETES\""
] |
[] |
[
"KUBERNETES"
] |
[]
|
["KUBERNETES"]
|
go
| 1 | 0 | |
pkg/remote/github/provider.go
|
package github
import (
"os"
"github.com/cloudskiff/driftctl/pkg/output"
"github.com/cloudskiff/driftctl/pkg/remote/terraform"
tf "github.com/cloudskiff/driftctl/pkg/terraform"
)
type GithubTerraformProvider struct {
*terraform.TerraformProvider
name string
version string
}
type githubConfig struct {
Token string
Owner string `cty:"owner"`
Organization string
}
func NewGithubTerraformProvider(version string, progress output.Progress, configDir string) (*GithubTerraformProvider, error) {
p := &GithubTerraformProvider{
version: version,
name: "github",
}
installer, err := tf.NewProviderInstaller(tf.ProviderConfig{
Key: p.name,
Version: version,
ConfigDir: configDir,
})
if err != nil {
return nil, err
}
tfProvider, err := terraform.NewTerraformProvider(installer, terraform.TerraformProviderConfig{
Name: p.name,
DefaultAlias: p.GetConfig().getDefaultOwner(),
GetProviderConfig: func(owner string) interface{} {
return githubConfig{
Owner: p.GetConfig().getDefaultOwner(),
}
},
}, progress)
if err != nil {
return nil, err
}
p.TerraformProvider = tfProvider
return p, err
}
func (c githubConfig) getDefaultOwner() string {
if c.Organization != "" {
return c.Organization
}
return c.Owner
}
func (p GithubTerraformProvider) GetConfig() githubConfig {
return githubConfig{
Token: os.Getenv("GITHUB_TOKEN"),
Owner: os.Getenv("GITHUB_OWNER"),
Organization: os.Getenv("GITHUB_ORGANIZATION"),
}
}
func (p *GithubTerraformProvider) Name() string {
return p.name
}
func (p *GithubTerraformProvider) Version() string {
return p.version
}
|
[
"\"GITHUB_TOKEN\"",
"\"GITHUB_OWNER\"",
"\"GITHUB_ORGANIZATION\""
] |
[] |
[
"GITHUB_OWNER",
"GITHUB_TOKEN",
"GITHUB_ORGANIZATION"
] |
[]
|
["GITHUB_OWNER", "GITHUB_TOKEN", "GITHUB_ORGANIZATION"]
|
go
| 3 | 0 | |
src/telemetry/logs.go
|
package telemetry
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
)
var url string
// Beautify the log message and send it the DataDog service
func log(level string, message string, values map[string]string) {
if os.Getenv("PRODUCTION") != "" {
values["message"] = message
values["level"] = level
jsonValue, _ := json.Marshal(values)
res, err := http.Post(url, "application/json", bytes.NewBuffer(jsonValue))
if err != nil {
fmt.Println(err)
}
defer res.Body.Close()
}
fmt.Printf("[%s] - %s\n", level, message)
}
// Different "flavours" of log message to make it easier to separate
func Debug(message string, values map[string]string) {
go log("debug", message, values)
}
func Info(message string, values map[string]string) {
go log("info", message, values)
}
func Warn(message string, values map[string]string) {
go log("warn", message, values)
}
func Error(message string, values map[string]string) {
go log("error", message, values)
}
// Initialize the url variable with the DataDogKey that is found in .env or the environment variables.
func Init() {
url = fmt.Sprintf("https://http-intake.logs.datadoghq.com/v1/input/%s?ddsource=nodejs&service=asura", os.Getenv("DATADOG_API_KEY"))
}
|
[
"\"PRODUCTION\"",
"\"DATADOG_API_KEY\""
] |
[] |
[
"PRODUCTION",
"DATADOG_API_KEY"
] |
[]
|
["PRODUCTION", "DATADOG_API_KEY"]
|
go
| 2 | 0 | |
nox/virtualenv.py
|
# Copyright 2016 Alethea Katherine Flowers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import re
import shutil
import sys
import py
import nox.command
from nox.logger import logger
# Problematic environment variables that are stripped from all commands inside
# of a virtualenv. See https://github.com/theacodes/nox/issues/44
_BLACKLISTED_ENV_VARS = frozenset([
'PIP_RESPECT_VIRTUALENV',
'PIP_REQUIRE_VIRTUALENV',
'__PYVENV_LAUNCHER__',
])
class ProcessEnv(object):
"""A environment with a 'bin' directory and a set of 'env' vars."""
def __init__(self, bin=None, env=None):
self._bin = bin
self.env = os.environ.copy()
if env is not None:
self.env.update(env)
for key in _BLACKLISTED_ENV_VARS:
self.env.pop(key, None)
if self.bin:
self.env['PATH'] = ':'.join([self.bin, self.env.get('PATH')])
@property
def bin(self):
return self._bin
def locate_via_py(version):
"""Find the Python executable using the Windows launcher.
This is based on :pep:397 which details that executing
``py.exe -{version}`` should execute python with the requested
version. We then make the python process print out its full
executable path which we use as the location for the version-
specific Python interpreter.
Args:
version (str): The desired Python version.
Returns:
Optional[str]: The full executable path for the Python ``version``,
if it is found.
"""
script = 'import sys; print(sys.executable)'
py_exe = py.path.local.sysfind('py')
if py_exe is not None:
try:
return py_exe.sysexec('-' + version, '-c', script).strip()
except py.process.cmdexec.Error:
return None
class VirtualEnv(ProcessEnv):
"""Virtualenv management class."""
def __init__(self, location, interpreter=None, reuse_existing=False):
self.location = os.path.abspath(location)
self.interpreter = interpreter
self.reuse_existing = reuse_existing
super(VirtualEnv, self).__init__()
def _clean_location(self):
"""Deletes any existing virtualenv"""
if os.path.exists(self.location):
if self.reuse_existing:
return False
else:
shutil.rmtree(self.location)
return True
@property
def _resolved_interpreter(self):
"""Return the interpreter, appropriately resolved for the platform.
Based heavily on tox's implementation (tox/interpreters.py).
"""
# If there is no assigned interpreter, then use the same one used by
# Nox.
if self.interpreter is None:
return sys.executable
# If this is just a X.Y or X.Y.Z string, stick `python` in front of it.
if re.match(r'^\d\.\d\.?\d?$', self.interpreter):
self.interpreter = 'python{}'.format(self.interpreter)
# Sanity check: We only need the rest of this behavior on Windows.
if platform.system() != 'Windows':
return self.interpreter
# We may have gotten a fully-qualified intepreter path (for someone
# _only_ testing on Windows); accept this.
if py.path.local.sysfind(self.interpreter):
return self.interpreter
# If this is a standard Unix "pythonX.Y" name, it should be found
# in a standard location in Windows, and if not, the py.exe launcher
# should be able to find it from the information in the registry.
match = re.match(r'^python(?P<ver>\d\.\d)$', self.interpreter)
if match:
version = match.group('ver')
# Ask the Python launcher to find the interpreter.
path_from_launcher = locate_via_py(version)
if path_from_launcher:
return path_from_launcher
# If we got this far, then we were unable to resolve the interpreter
# to an actual executable; raise an exception.
raise RuntimeError('Unable to locate Python interpreter "{}".'.format(
self.interpreter,
))
@property
def bin(self):
"""Returns the location of the virtualenv's bin folder."""
if platform.system() == 'Windows':
return os.path.join(self.location, 'Scripts')
else:
return os.path.join(self.location, 'bin')
def create(self):
"""Create the virtualenv."""
if not self._clean_location():
logger.debug(
'Re-using existing virtualenv at {}.'.format(self.location))
return False
cmd = [sys.executable, '-m', 'virtualenv', self.location]
if self.interpreter:
cmd.extend(['-p', self._resolved_interpreter])
logger.info(
'Creating virtualenv using {} in {}'.format(
os.path.basename(self._resolved_interpreter), self.location))
nox.command.run(cmd, silent=True, log=False)
return True
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
check.py
|
#!/usr/bin/env python3
#
# Copyright 2015 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import subprocess
import sys
import unittest
from collections import OrderedDict
from scripts.test import asm2wasm
from scripts.test import binaryenjs
from scripts.test import lld
from scripts.test import shared
from scripts.test import support
from scripts.test import wasm2js
from scripts.test import wasm_opt
if shared.options.interpreter:
print('[ using wasm interpreter at "%s" ]' % shared.options.interpreter)
assert os.path.exists(shared.options.interpreter), 'interpreter not found'
def get_changelog_version():
with open(os.path.join(shared.options.binaryen_root, 'CHANGELOG.md')) as f:
lines = f.readlines()
lines = [l for l in lines if len(l.split()) == 1]
lines = [l for l in lines if l.startswith('v')]
version = lines[0][1:]
print("Parsed CHANGELOG.md version: %s" % version)
return int(version)
def run_help_tests():
print('[ checking --help is useful... ]\n')
not_executable_suffix = ['.txt', '.js', '.ilk', '.pdb', '.dll', '.wasm', '.manifest']
bin_files = [os.path.join(shared.options.binaryen_bin, f) for f in os.listdir(shared.options.binaryen_bin)]
executables = [f for f in bin_files if os.path.isfile(f) and not any(f.endswith(s) for s in not_executable_suffix)]
executables = sorted(executables)
assert len(executables)
for e in executables:
print('.. %s --help' % e)
out, err = subprocess.Popen([e, '--help'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
assert len(err) == 0, 'Expected no stderr, got:\n%s' % err
assert os.path.basename(e).replace('.exe', '') in out, 'Expected help to contain program name, got:\n%s' % out
assert len(out.split('\n')) > 8, 'Expected some help, got:\n%s' % out
print('[ checking --version ... ]\n')
changelog_version = get_changelog_version()
for e in executables:
print('.. %s --version' % e)
out, err = subprocess.Popen([e, '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
assert len(err) == 0, 'Expected no stderr, got:\n%s' % err
assert os.path.basename(e).replace('.exe', '') in out, 'Expected version to contain program name, got:\n%s' % out
assert len(out.strip().splitlines()) == 1, 'Expected only version info, got:\n%s' % out
parts = out.split()
assert parts[1] == 'version'
version = int(parts[2])
assert version == changelog_version
def run_wasm_dis_tests():
print('\n[ checking wasm-dis on provided binaries... ]\n')
for t in shared.get_tests(shared.options.binaryen_test, ['.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_DIS + [t]
if os.path.isfile(t + '.map'):
cmd += ['--source-map', t + '.map']
actual = support.run_command(cmd)
shared.fail_if_not_identical_to_file(actual, t + '.fromBinary')
# also verify there are no validation errors
def check():
cmd = shared.WASM_OPT + [t, '-all']
support.run_command(cmd)
shared.with_pass_debug(check)
shared.validate_binary(t)
def run_crash_tests():
print("\n[ checking we don't crash on tricky inputs... ]\n")
for t in shared.get_tests(shared.get_test_dir('crash'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t]
# expect a parse error to be reported
support.run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1)
def run_dylink_tests():
print("\n[ we emit dylink sections properly... ]\n")
dylink_tests = glob.glob(os.path.join(shared.options.binaryen_test, 'dylib*.wasm'))
for t in sorted(dylink_tests):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t, '-o', 'a.wasm']
support.run_command(cmd)
with open('a.wasm', 'rb') as output:
index = output.read().find(b'dylink')
print(' ', index)
assert index == 11, 'dylink section must be first, right after the magic number etc.'
def run_ctor_eval_tests():
print('\n[ checking wasm-ctor-eval... ]\n')
for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
ctors = open(t + '.ctors').read().strip()
cmd = shared.WASM_CTOR_EVAL + [t, '-all', '-o', 'a.wat', '-S', '--ctors', ctors]
support.run_command(cmd)
actual = open('a.wat').read()
out = t + '.out'
shared.fail_if_not_identical_to_file(actual, out)
def run_wasm_metadce_tests():
print('\n[ checking wasm-metadce ]\n')
for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
graph = t + '.graph.txt'
cmd = shared.WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wat', '-S', '-all']
stdout = support.run_command(cmd)
expected = t + '.dced'
with open('a.wat') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
shared.fail_if_not_identical_to_file(stdout, expected + '.stdout')
def run_wasm_reduce_tests():
if not shared.has_shell_timeout():
print('\n[ skipping wasm-reduce testcases]\n')
return
print('\n[ checking wasm-reduce testcases]\n')
# fixed testcases
for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']):
print('..', os.path.basename(t))
# convert to wasm
support.run_command(shared.WASM_AS + [t, '-o', 'a.wasm'])
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec --detect-features ' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4'])
expected = t + '.txt'
support.run_command(shared.WASM_DIS + ['c.wasm', '-o', 'a.wat'])
with open('a.wat') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
# run on a nontrivial fuzz testcase, for general coverage
# this is very slow in ThreadSanitizer, so avoid it there
if 'fsanitize=thread' not in str(os.environ):
print('\n[ checking wasm-reduce fuzz testcase ]\n')
# TODO: re-enable multivalue once it is better optimized
support.run_command(shared.WASM_OPT + [os.path.join(shared.options.binaryen_test, 'signext.wast'), '-ttf', '-Os', '-o', 'a.wasm', '--detect-features', '--disable-multivalue'])
before = os.stat('a.wasm').st_size
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec --detect-features' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm'])
after = os.stat('c.wasm').st_size
# This number is a custom threshold to check if we have shrunk the
# output sufficiently
assert after < 0.85 * before, [before, after]
def run_spec_tests():
print('\n[ checking wasm-shell spec testcases... ]\n')
for wast in shared.options.spec_tests:
print('..', os.path.basename(wast))
def run_spec_test(wast):
cmd = shared.WASM_SHELL + [wast]
output = support.run_command(cmd, stderr=subprocess.PIPE)
# filter out binaryen interpreter logging that the spec suite
# doesn't expect
filtered = [line for line in output.splitlines() if not line.startswith('[trap')]
return '\n'.join(filtered) + '\n'
def run_opt_test(wast):
# check optimization validation
cmd = shared.WASM_OPT + [wast, '-O', '-all']
support.run_command(cmd)
def check_expected(actual, expected):
if expected and os.path.exists(expected):
expected = open(expected).read()
print(' (using expected output)')
actual = actual.strip()
expected = expected.strip()
if actual != expected:
shared.fail(actual, expected)
expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')
# some spec tests should fail (actual process failure, not just assert_invalid)
try:
actual = run_spec_test(wast)
except Exception as e:
if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in os.path.basename(wast):
print('<< test failed as expected >>')
continue # don't try all the binary format stuff TODO
else:
shared.fail_with_error(str(e))
check_expected(actual, expected)
# skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature
if 'exports.wast' in os.path.basename(wast): # FIXME
continue
# check binary format. here we can verify execution of the final
# result, no need for an output verification
# some wast files cannot be split:
# * comments.wast: contains characters that are not valid utf-8,
# so our string splitting code fails there
# FIXME Remove reference type tests from this list after nullref is
# implemented in V8
if os.path.basename(wast) not in ['comments.wast', 'ref_null.wast', 'ref_is_null.wast', 'ref_func.wast', 'old_select.wast']:
split_num = 0
actual = ''
for module, asserts in support.split_wast(wast):
print(' testing split module', split_num)
split_num += 1
support.write_wast('split.wast', module, asserts)
run_spec_test('split.wast') # before binary stuff - just check it's still ok split out
run_opt_test('split.wast') # also that our optimizer doesn't break on it
result_wast = shared.binary_format_check('split.wast', verify_final_result=False, original_wast=wast)
# add the asserts, and verify that the test still passes
open(result_wast, 'a').write('\n' + '\n'.join(asserts))
actual += run_spec_test(result_wast)
# compare all the outputs to the expected output
check_expected(actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log'))
else:
# handle unsplittable wast files
run_spec_test(wast)
def run_validator_tests():
print('\n[ running validation tests... ]\n')
# Ensure the tests validate by default
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')]
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')]
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=none', os.path.join(shared.get_test_dir('validator'), 'invalid_return.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_number.wast')]
support.run_command(cmd, expected_status=1)
def run_gcc_tests():
print('\n[ checking native gcc testcases...]\n')
if not shared.NATIVECC or not shared.NATIVEXX:
shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!')
return
for t in sorted(os.listdir(shared.get_test_dir('example'))):
output_file = 'example'
cmd = ['-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file]
if t.endswith('.txt'):
# check if there is a trace in the file, if so, we should build it
out = subprocess.check_output([os.path.join(shared.options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(shared.get_test_dir('example'), t)])
if len(out) == 0:
print(' (no trace in ', t, ')')
continue
print(' (will check trace in ', t, ')')
src = 'trace.cpp'
with open(src, 'wb') as o:
o.write(out)
expected = os.path.join(shared.get_test_dir('example'), t + '.txt')
else:
src = os.path.join(shared.get_test_dir('example'), t)
expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt')
if src.endswith(('.c', '.cpp')):
# build the C file separately
libpath = os.path.join(os.path.dirname(shared.options.binaryen_bin), 'lib')
extra = [shared.NATIVECC, src, '-c', '-o', 'example.o',
'-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread']
if src.endswith('.cpp'):
extra += ['-std=c++14']
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
extra.append(f)
print('build: ', ' '.join(extra))
subprocess.check_call(extra)
# Link against the binaryen C library DSO, using an executable-relative rpath
cmd = ['example.o', '-L' + libpath, '-lbinaryen'] + cmd + ['-Wl,-rpath,' + libpath]
else:
continue
print(' ', t, src, expected)
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
cmd.append(f)
cmd = [shared.NATIVEXX, '-std=c++14'] + cmd
print('link: ', ' '.join(cmd))
subprocess.check_call(cmd)
print('run...', output_file)
actual = subprocess.check_output([os.path.abspath(output_file)]).decode('utf-8')
os.remove(output_file)
shared.fail_if_not_identical_to_file(actual, expected)
def run_unittest():
print('\n[ checking unit tests...]\n')
# equivalent to `python -m unittest discover -s ./test -v`
suite = unittest.defaultTestLoader.discover(os.path.dirname(shared.options.binaryen_test))
result = unittest.TextTestRunner(verbosity=2, failfast=shared.options.abort_on_first_failure).run(suite)
shared.num_failures += len(result.errors) + len(result.failures)
if shared.options.abort_on_first_failure and shared.num_failures:
raise Exception("unittest failed")
TEST_SUITES = OrderedDict([
('help-messages', run_help_tests),
('wasm-opt', wasm_opt.test_wasm_opt),
('asm2wasm', asm2wasm.test_asm2wasm),
('asm2wasm-binary', asm2wasm.test_asm2wasm_binary),
('wasm-dis', run_wasm_dis_tests),
('crash', run_crash_tests),
('dylink', run_dylink_tests),
('ctor-eval', run_ctor_eval_tests),
('wasm-metadce', run_wasm_metadce_tests),
('wasm-reduce', run_wasm_reduce_tests),
('spec', run_spec_tests),
('lld', lld.test_wasm_emscripten_finalize),
('wasm2js', wasm2js.test_wasm2js),
('validator', run_validator_tests),
('gcc', run_gcc_tests),
('unit', run_unittest),
('binaryenjs', binaryenjs.test_binaryen_js),
('binaryenjs_wasm', binaryenjs.test_binaryen_wasm),
])
# Run all the tests
def main():
all_suites = TEST_SUITES.keys()
skip_by_default = ['binaryenjs', 'binaryenjs_wasm']
if shared.options.list_suites:
for suite in all_suites:
print(suite)
return 0
for r in shared.requested:
if r not in all_suites:
print('invalid test suite: %s (see --list-suites)\n' % r)
return 1
if not shared.requested:
shared.requested = [s for s in all_suites if s not in skip_by_default]
for test in shared.requested:
TEST_SUITES[test]()
# Check/display the results
if shared.num_failures == 0:
print('\n[ success! ]')
if shared.warnings:
print('\n' + '\n'.join(shared.warnings))
if shared.num_failures > 0:
print('\n[ ' + str(shared.num_failures) + ' failures! ]')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
[] |
[] |
[
"COMPILER_FLAGS"
] |
[]
|
["COMPILER_FLAGS"]
|
python
| 1 | 0 | |
models/pointnet_util_current.py
|
"""
Author: Benny
Date: Nov 2019
"""
from data_utils.ModelNetDataLoader import ModelNetDataLoader
import argparse
import numpy as np
import os
import torch
import logging
from tqdm import tqdm
import sys
import importlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('PointNet')
parser.add_argument('--batch_size', type=int, default=24, help='batch size in training')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')
parser.add_argument('--log_dir', type=str, default='pointnet2_ssg_normal', help='Experiment root')
parser.add_argument('--normal', action='store_true', default=True, help='Whether to use normal information [default: False]')
parser.add_argument('--num_votes', type=int, default=3, help='Aggregate classification scores with voting [default: 3]')
return parser.parse_args()
def test(model, loader, num_class=40, vote_num=1):
mean_correct = []
class_acc = np.zeros((num_class,3))
for j, data in tqdm(enumerate(loader), total=len(loader)):
points, target = data
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.cuda(), target.cuda()
classifier = model.eval()
vote_pool = torch.zeros(target.size()[0],num_class).cuda()
for _ in range(vote_num):
pred, _ = classifier(points)
vote_pool += pred
pred = vote_pool/vote_num
pred_choice = pred.data.max(1)[1]
for cat in np.unique(target.cpu()):
classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum()
class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0])
class_acc[cat,1]+=1
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item()/float(points.size()[0]))
class_acc[:,2] = class_acc[:,0]/ class_acc[:,1]
class_acc = np.mean(class_acc[:,2])
instance_acc = np.mean(mean_correct)
return instance_acc, class_acc
def main(args):
def log_string(str):
logger.info(str)
print(str)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''CREATE DIR'''
experiment_dir = 'log/classification/' + args.log_dir
'''LOG'''
args = parse_args()
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log_string('PARAMETER ...')
log_string(args)
'''DATA LOADING'''
log_string('Load dataset ...')
DATA_PATH = 'data/modelnet40_normal_resampled/'
TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)
'''MODEL LOADING'''
num_class = 40
model_name = os.listdir(experiment_dir+'/logs')[0].split('.')[0]
MODEL = importlib.import_module(model_name)
classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda()
checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
classifier.load_state_dict(checkpoint['model_state_dict'])
with torch.no_grad():
instance_acc, class_acc = test(classifier.eval(), testDataLoader, vote_num=args.num_votes)
log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc))
if __name__ == '__main__':
args = parse_args()
main(args)
import torch
import torch.nn as nn
import torch.nn.functional as F
from time import time
import numpy as np
import time as Time
def timeit(tag, t):
print("{}: {}s".format(tag, time() - t))
return time()
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.darthest_pointmax(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def binary_search(sorted_points, origin, margin):
left = min(origin, margin)
right = max(origin, margin)
expected = (float(sorted_points[left]) + float(sorted_points[right]))/2
center = int((left+right)/2)
while(left < right):
center = int((left+right)/2)
if sorted_points[center] == expected:
break
if sorted_points[center] < expected:
left = center+1
else:
right = center-1
return center
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B,N,C = xyz.shape
xyz = torch.squeeze(xyz)
xyz = (xyz + 1)*100
xyz = xyz.int()
Start_FPS = Time.time()
sum_dims = torch.sum(xyz, -1)
sorted_points , indices = torch.sort(sum_dims, -1)
sorted_points = sorted_points.numpy()
indices = indices.numpy()
initial_point = np.random.randint(1, N-2)
centroids = []
scores = np.zeros(N)
centroids.append(initial_point)
candidates = set()
candidates.add(0)
candidates.add(N-1)
scores[0] = np.abs(sorted_points[initial_point]- sorted_points[0])
scores[-1] = np.abs(sorted_points[initial_point]- sorted_points[-1])
for i in range(npoint - 1):
selected_centroid = np.argmax(scores)
scores[selected_centroid] = 0
centroids.append(selected_centroid)
centroids = sorted(centroids)
candidates.remove(selected_centroid)
index = np.where(centroids==selected_centroid)[0][0]
if not index == (len(centroids)-1):
candidate_right = indices[binary_search(sorted_points,selected_centroid, centroids[index+1])]
if candidate_right in centroids:
R = candidate_right
while R in centroids and (R<N-1):
R+=1
L = candidate_right
while L in centroids and (L>0):
L-=1
L_score = abs(sorted_points[centroids[index+1]] - sorted_points[L])
R_score = abs(sorted_points[selected_centroid] - sorted_points[R])
if L_score > R_score or R==N-1:
candidate_right = L
if R_score > L_score or L==0:
candidate_right = R
candidates.add(candidate_right)
middle_score = sorted_points[candidate_right]
next_score = sorted_points[centroids[index+1]]
past_score = sorted_points[selected_centroid]
this_score = max(abs(next_score - middle_score), abs(middle_score-past_score))
scores[candidate_right] = this_score
if not index == 0:
candidate_left = indices[binary_search(sorted_points, selected_centroid, centroids[index-1])]
if candidate_left in centroids:
R = candidate_left
while (R in centroids) and (R < N-1):
R+=1
L = candidate_left
while (L in centroids) and (L>0):
L-=1
L_score = abs(sorted_points[selected_centroid] - sorted_points[L])
R_score = abs(sorted_points[centroids[index-1]] - sorted_points[R])
if L_score > R_score or R==N-1:
candidate_left = L
if R_score > L_score or L==0:
candidate_left = R_score
middle_score = sorted_points[candidate_left]
next_score = sorted_points[selected_centroid]
past_score = sorted_points[centroids[index-1]]
this_score = max(abs(next_score - middle_score), abs(middle_score-past_score))
candidates.add(candidate_left)
scores[candidate_left] = this_score
final_centroids = []
for item in centroids:
final_centroids.append(indices[item])
final_centroids = np.array(final_centroids)
centroids = np.expand_dims(centroids, axis=0)
FPS_Time = Time.time() - Start_FPS
with open("FPS_Whole.txt", 'a') as f:
f.write(str(FPS_Time)+"\n")
return torch.tensor(centroids)
def query_ball_point(radius, nsample, xyz, new_xyz):
start = Time.time()
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
#print(group_idx)
#print(group_idx.shape)
#print("group_idx")
#raise(False)
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
with open("query_ball_point.txt", 'a') as f:
f.write(str(Time.time() - start)+"\n")
#print("query ball point", Time.time() - start, "\n")
return group_idx
def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):
"""
Input:
npoint:
radius:
nsample:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, npoint, nsample, 3]
new_points: sampled points data, [B, npoint, nsample, 3+D]
"""
B, N, C = xyz.shape
S = npoint
fps_idx = farthest_point_sample(xyz, npoint) # [B, npoint, C]
torch.cuda.empty_cache()
new_xyz = index_points(xyz, fps_idx)
torch.cuda.empty_cache()
idx = query_ball_point(radius, nsample, xyz, new_xyz)
torch.cuda.empty_cache()
grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
torch.cuda.empty_cache()
grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
torch.cuda.empty_cache()
if points is not None:
grouped_points = index_points(points, idx)
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) # [B, npoint, nsample, C+D]
else:
new_points = grouped_xyz_norm
if returnfps:
return new_xyz, new_points, grouped_xyz, fps_idx
else:
return new_xyz, new_points
def sample_and_group_all(xyz, points):
"""
Input:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, 3]
new_points: sampled points data, [B, 1, N, 3+D]
"""
device = xyz.device
B, N, C = xyz.shape
new_xyz = torch.zeros(B, 1, C).to(device)
grouped_xyz = xyz.view(B, 1, N, C)
if points is not None:
new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)
else:
new_points = grouped_xyz
return new_xyz, new_points
class PointNetSetAbstraction(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.group_all = group_all
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
if self.group_all:
new_xyz, new_points = sample_and_group_all(xyz, points)
else:
new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points)
# new_xyz: sampled points position data, [B, npoint, C]
# new_points: sampled points data, [B, npoint, nsample, C+D]
new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points = torch.max(new_points, 2)[0]
new_xyz = new_xyz.permute(0, 2, 1)
return new_xyz, new_points
class PointNetSetAbstractionMsg(nn.Module):
def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list):
super(PointNetSetAbstractionMsg, self).__init__()
self.npoint = npoint
self.radius_list = radius_list
self.nsample_list = nsample_list
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
for i in range(len(mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
last_channel = in_channel + 3
for out_channel in mlp_list[i]:
convs.append(nn.Conv2d(last_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
B, N, C = xyz.shape
S = self.npoint
new_xyz = index_points(xyz, farthest_point_sample(xyz, S))
new_points_list = []
for i, radius in enumerate(self.radius_list):
K = self.nsample_list[i]
group_idx = query_ball_point(radius, K, xyz, new_xyz)
grouped_xyz = index_points(xyz, group_idx)
grouped_xyz -= new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, group_idx)
grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)
else:
grouped_points = grouped_xyz
grouped_points = grouped_points.permute(0, 3, 2, 1) # [B, D, K, S]
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_points = F.relu(bn(conv(grouped_points)))
new_points = torch.max(grouped_points, 2)[0] # [B, D', S]
new_points_list.append(new_points)
new_xyz = new_xyz.permute(0, 2, 1)
new_points_concat = torch.cat(new_points_list, dim=1)
return new_xyz, new_points_concat
class PointNetFeaturePropagation(nn.Module):
def __init__(self, in_channel, mlp):
super(PointNetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, xyz1, xyz2, points1, points2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N]
"""
xyz1 = xyz1.permute(0, 2, 1)
xyz2 = xyz2.permute(0, 2, 1)
points2 = points2.permute(0, 2, 1)
B, N, C = xyz1.shape
_, S, _ = xyz2.shape
if S == 1:
interpolated_points = points2.repeat(1, N, 1)
else:
dists = square_distance(xyz1, xyz2)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
dist_recip = 1.0 / (dists + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)
if points1 is not None:
points1 = points1.permute(0, 2, 1)
new_points = torch.cat([points1, interpolated_points], dim=-1)
else:
new_points = interpolated_points
new_points = new_points.permute(0, 2, 1)
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
return new_points
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
malaya/tatabahasa.py
|
from malaya.supervised import t5 as t5_load
from malaya.supervised import transformer as load_transformer
from malaya.model.tf import Tatabahasa
from malaya.model.t5 import Tatabahasa as T5_Tatabahasa
from herpetologist import check_type
_transformer_tagging_availability = {
'small': {
'Size (MB)': 397,
'Quantized Size (MB)': 100,
'Sequence Accuracy': 0.860198,
'Sequence Tagging Accuracy': 0.96326745,
},
'base': {
'Size (MB)': 875,
'Quantized Size (MB)': 220,
'Sequence Accuracy': 0.938972,
'Sequence Tagging Accuracy': 0.977407,
},
}
_transformer_availability = {
't5': {
'Size (MB)': 1250,
'Quantized Size (MB)': 481,
'WER': 0.0178902,
},
'small-t5': {
'Size (MB)': 355.6,
'Quantized Size (MB)': 195,
'WER': 0.0187973,
},
'tiny-t5': {
'Size (MB)': 208,
'Quantized Size (MB)': 103,
'WER': 0.0328037,
},
'super-tiny-t5': {
'Size (MB)': 81.8,
'Quantized Size (MB)': 27.1,
'WER': 0.0351141,
},
'3x-super-tiny-t5': {
'Size (MB)': 18.3,
'Quantized Size (MB)': 4.46,
'WER': 0.03676189,
},
}
def describe_tagging():
"""
Describe kesalahan tatabahasa supported.
Full description at https://tatabahasabm.tripod.com/tata/salahtata.htm
"""
d = [{'class': 0,
'Description': 'PAD',
'salah': '',
'betul': ''},
{'class': 1,
'Description': 'kesambungan subwords',
'salah': '',
'betul': '',
},
{'class': 2,
'Description': 'tiada kesalahan',
'salah': '',
'betul': '',
},
{'class': 3,
'Description': 'kesalahan frasa nama, Perkara yang diterangkan mesti mendahului "penerang"',
'salah': 'Cili sos',
'betul': 'sos cili',
},
{'class': 4,
'Description': 'kesalahan kata jamak',
'salah': 'mereka-mereka',
'betul': 'mereka',
},
{'class': 5,
'Description': 'kesalahan kata penguat',
'salah': 'sangat tinggi sekali',
'betul': 'sangat tinggi',
},
{'class': 6,
'Description': 'kata adjektif dan imbuhan "ter" tanpa penguat.',
'salah': 'Sani mendapat markah yang tertinggi sekali.',
'betul': 'Sani mendapat markah yang tertinggi.',
},
{'class': 7,
'Description': 'kesalahan kata hubung',
'salah': 'Sally sedang membaca bila saya tiba di rumahnya.',
'betul': 'Sally sedang membaca apabila saya tiba di rumahnya.',
},
{'class': 8,
'Description': 'kesalahan kata bilangan',
'salah': 'Beribu peniaga tidak membayar cukai pendapatan.',
'betul': 'Beribu-ribu peniaga tidak membayar cukai pendapatan',
},
{'class': 9,
'Description': 'kesalahan kata sendi',
'salah': 'Umar telah berpindah daripada sekolah ini bulan lalu.',
'betul': 'Umar telah berpindah dari sekolah ini bulan lalu.',
},
{'class': 10,
'Description': 'kesalahan penjodoh bilangan',
'salah': 'Setiap orang pelajar',
'betul': 'Setiap pelajar.',
},
{'class': 11,
'Description': 'kesalahan kata ganti diri',
'salah': 'Pencuri itu telah ditangkap. Beliau dibawa ke balai polis.',
'betul': 'Pencuri itu telah ditangkap. Dia dibawa ke balai polis.',
},
{'class': 12,
'Description': 'kesalahan ayat pasif',
'salah': 'Cerpen itu telah dikarang oleh saya.',
'betul': 'Cerpen itu telah saya karang.',
},
{'class': 13,
'Description': 'kesalahan kata tanya',
'salah': 'Kamu berasal dari manakah ?',
'betul': 'Kamu berasal dari mana ?',
},
{'class': 14,
'Description': 'kesalahan tanda baca',
'salah': 'Kamu berasal dari manakah .',
'betul': 'Kamu berasal dari mana ?',
},
{'class': 15,
'Description': 'kesalahan kata kerja tak transitif',
'salah': 'Dia kata kepada saya',
'betul': 'Dia berkata kepada saya',
},
{'class': 16,
'Description': 'kesalahan kata kerja transitif',
'salah': 'Dia suka baca buku',
'betul': 'Dia suka membaca buku',
},
{'class': 17,
'Description': 'penggunaan kata yang tidak tepat',
'salah': 'Tembuk Besar negeri Cina dibina oleh Shih Huang Ti.',
'betul': 'Tembok Besar negeri Cina dibina oleh Shih Huang Ti',
},
]
from malaya.function import describe_availability
return describe_availability(d, transpose=False)
def available_transformer_tagging():
"""
List available transformer tagging models.
"""
from malaya.function import describe_availability
return describe_availability(
_transformer_tagging_availability,
text='tested on 10k kesalahan tatabahasa texts.',
)
def available_transformer():
"""
List available transformer models.
"""
from malaya.function import describe_availability
return describe_availability(
_transformer_availability,
text='tested on 7.5k kesalahan tatabahasa texts.',
)
@ check_type
def transformer(model: str = 'small-t5', quantized: bool = False, **kwargs):
"""
Load Malaya transformer encoder-decoder model to correct a `kesalahan tatabahasa` text.
Parameters
----------
model : str, optional (default='small-t5')
Model architecture supported. Allowed values:
* ``'t5'`` - T5 BASE parameters.
* ``'small-t5'`` - T5 SMALL parameters.
* ``'tiny-t5'`` - T5 TINY parameters.
* ``'super-tiny-t5'`` - T5 SUPER TINY parameters.
* ``'3x-super-tiny-t5'`` - T5 3X SUPER TINY parameters.
quantized : bool, optional (default=False)
if True, will load 8-bit quantized model.
Quantized model not necessary faster, totally depends on the machine.
Returns
-------
result: model
List of model classes:
* if `t5` in model, will return `malaya.model.t5.Tatabahasa`.
"""
model = model.lower()
if model not in _transformer_availability:
raise ValueError(
'model not supported, please check supported models from `malaya.tatabahasa.available_transformer()`.'
)
return t5_load.load(
module='kesalahan-tatabahasa',
model=model,
model_class=T5_Tatabahasa,
quantized=quantized,
**kwargs,
)
@check_type
def transformer_tagging(model: str = 'base', quantized: bool = False, **kwargs):
"""
Load Malaya transformer encoder-decoder + tagging model to correct a `kesalahan tatabahasa` text.
Parameters
----------
model : str, optional (default='base')
Model architecture supported. Allowed values:
* ``'small'`` - Malaya Transformer Tag SMALL parameters.
* ``'base'`` - Malaya Transformer Tag BASE parameters.
quantized : bool, optional (default=False)
if True, will load 8-bit quantized model.
Quantized model not necessary faster, totally depends on the machine.
Returns
-------
result: malaya.model.tf.Tatabahasa class
"""
model = model.lower()
if model not in _transformer_tagging_availability:
raise ValueError(
'model not supported, please check supported models from `malaya.tatabahasa.available_transformer_tagging()`.'
)
return load_transformer.load_tatabahasa(
module='kesalahan-tatabahasa',
model=model,
model_class=Tatabahasa,
quantized=quantized,
**kwargs
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
handler/api/repos/enable.go
|
// Copyright 2019 Drone IO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package repos
import (
"net/http"
"os"
"github.com/drone/drone/core"
"github.com/drone/drone/handler/api/render"
"github.com/drone/drone/handler/api/request"
"github.com/drone/drone/logger"
"github.com/dchest/uniuri"
"github.com/go-chi/chi"
)
// FEATURE FLAG enables a static secret value used to sign
// incoming requests routed through a proxy. This was implemented
// based on feedback from @chiraggadasc and and should not be
// removed until we have a permanent solution in place.
var staticSigner = os.Getenv("DRONE_FEATURE_SERVER_PROXY_SECRET")
// HandleEnable returns an http.HandlerFunc that processes http
// requests to enable a repository in the system.
func HandleEnable(
hooks core.HookService,
repos core.RepositoryStore,
sender core.WebhookSender,
) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var (
owner = chi.URLParam(r, "owner")
name = chi.URLParam(r, "name")
)
user, _ := request.UserFrom(r.Context())
repo, err := repos.FindName(r.Context(), owner, name)
if err != nil {
render.NotFound(w, err)
logger.FromRequest(r).
WithError(err).
WithField("namespace", owner).
WithField("name", name).
Debugln("api: repository not found")
return
}
repo.Active = true
repo.UserID = user.ID
if repo.Config == "" {
repo.Config = ".drone.yml"
}
if repo.Signer == "" {
repo.Signer = uniuri.NewLen(32)
}
if repo.Secret == "" {
repo.Secret = uniuri.NewLen(32)
}
if repo.Timeout == 0 {
repo.Timeout = 60
}
if staticSigner != "" {
repo.Signer = staticSigner
}
err = hooks.Create(r.Context(), user, repo)
if err != nil {
render.InternalError(w, err)
logger.FromRequest(r).
WithError(err).
WithField("namespace", owner).
WithField("name", name).
Debugln("api: cannot create or update hook")
return
}
err = repos.Activate(r.Context(), repo)
if err == core.ErrRepoLimit {
render.ErrorCode(w, err, 402)
logger.FromRequest(r).
WithError(err).
WithField("namespace", owner).
WithField("name", name).
Errorln("api: cannot activate repository")
return
}
if err != nil {
render.InternalError(w, err)
logger.FromRequest(r).
WithError(err).
WithField("namespace", owner).
WithField("name", name).
Debugln("api: cannot activate repository")
return
}
err = sender.Send(r.Context(), &core.WebhookData{
Event: core.WebhookEventRepo,
Action: core.WebhookActionEnabled,
User: user,
Repo: repo,
})
if err != nil {
logger.FromRequest(r).
WithError(err).
WithField("namespace", owner).
WithField("name", name).
Warnln("api: cannot send webhook")
}
render.JSON(w, repo, 200)
}
}
|
[
"\"DRONE_FEATURE_SERVER_PROXY_SECRET\""
] |
[] |
[
"DRONE_FEATURE_SERVER_PROXY_SECRET"
] |
[]
|
["DRONE_FEATURE_SERVER_PROXY_SECRET"]
|
go
| 1 | 0 | |
departure/cli/web.py
|
import os
import uvicorn
import click
@click.command()
def start():
uvicorn_reload = "UVICORN_RELOAD" in os.environ
uvicorn.run("departure.web.api_server:app", reload=uvicorn_reload, host="0.0.0.0")
if __name__ == "__main__":
start()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main.go
|
package main
import (
"log"
"net/http"
"os"
"golang.org/x/net/webdav"
)
type methodMux map[string]http.Handler
func (m *methodMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if h, ok := (*m)[r.Method]; ok {
h.ServeHTTP(w, r)
} else {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
}
func main() {
listen := os.Getenv("LISTEN")
root := os.Getenv("ROOT")
prefix := os.Getenv("PREFIX")
files := http.StripPrefix(prefix, http.FileServer(http.Dir(root)))
webdav := &webdav.Handler{
Prefix: prefix,
FileSystem: webdav.Dir(root),
LockSystem: webdav.NewMemLS(),
Logger: func(r *http.Request, err error) {
if err != nil {
log.Printf("r=%v err=%v", r, err)
}
},
}
loggedWebdav := logRequestHandler(webdav)
mux := methodMux(map[string]http.Handler{
"GET": logRequestHandler(files),
"OPTIONS": loggedWebdav,
"PROPFIND": loggedWebdav,
"PROPPATCH": loggedWebdav,
"MKCOL": loggedWebdav,
"COPY": loggedWebdav,
"MOVE": loggedWebdav,
"LOCK": loggedWebdav,
"UNLOCK": loggedWebdav,
"DELETE": loggedWebdav,
"PUT": loggedWebdav,
})
if err := http.ListenAndServe(listen, &mux); err != nil {
log.Fatal(err)
}
}
func logRequestHandler(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
// call the original http.Handler we're wrapping
h.ServeHTTP(w, r)
// gather information about request and log it
uri := r.URL.String()
method := r.Method
// ... more information
log.Printf("%s %s", method, uri)
}
// http.HandlerFunc wraps a function so that it
// implements http.Handler interface
return http.HandlerFunc(fn)
}
|
[
"\"LISTEN\"",
"\"ROOT\"",
"\"PREFIX\""
] |
[] |
[
"PREFIX",
"ROOT",
"LISTEN"
] |
[]
|
["PREFIX", "ROOT", "LISTEN"]
|
go
| 3 | 0 | |
vet.go
|
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
)
// vetCheck runs the "vet" tool on the source code in req.Body.
// In case of no errors it returns an empty, non-nil *response.
// Otherwise &response.Errors contains found errors.
//
// Deprecated: this is the handler for the legacy /vet endpoint; use
// the /compile (compileAndRun) handler instead with the WithVet
// boolean set. This code path doesn't support modules and only exists
// as a temporary compatiblity bridge to older javascript clients.
func vetCheck(ctx context.Context, req *request) (*response, error) {
tmpDir, err := ioutil.TempDir("", "vet")
if err != nil {
return nil, fmt.Errorf("error creating temp directory: %v", err)
}
defer os.RemoveAll(tmpDir)
in := filepath.Join(tmpDir, progName)
if err := ioutil.WriteFile(in, []byte(req.Body), 0400); err != nil {
return nil, fmt.Errorf("error creating temp file %q: %v", in, err)
}
const useModules = false // legacy handler; no modules (see func comment)
vetOutput, err := vetCheckInDir(tmpDir, os.Getenv("GOPATH"), useModules)
if err != nil {
// This is about errors running vet, not vet returning output.
return nil, err
}
return &response{Errors: vetOutput}, nil
}
// vetCheckInDir runs go vet in the provided directory, using the
// provided GOPATH value, and whether modules are enabled. The
// returned error is only about whether go vet was able to run, not
// whether vet reported problem. The returned value is ("", nil) if
// vet successfully found nothing, and (non-empty, nil) if vet ran and
// found issues.
func vetCheckInDir(dir, goPath string, modules bool) (output string, execErr error) {
cmd := exec.Command("go", "vet")
if !modules {
cmd.Args = append(cmd.Args, progName)
}
cmd.Dir = dir
// Linux go binary is not built with CGO_ENABLED=0.
// Prevent vet to compile packages in cgo mode.
// See #26307.
cmd.Env = append(os.Environ(), "CGO_ENABLED=0", "GOPATH="+goPath)
if modules {
cmd.Env = append(cmd.Env,
"GO111MODULE=on",
"GOPROXY="+playgroundGoproxy(),
)
}
out, err := cmd.CombinedOutput()
if err == nil {
return "", nil
}
if _, ok := err.(*exec.ExitError); !ok {
return "", fmt.Errorf("error vetting go source: %v", err)
}
// Rewrite compiler errors to refer to progName
// instead of '/tmp/sandbox1234/main.go'.
errs := strings.Replace(string(out), dir, "", -1)
// Remove vet's package name banner.
if strings.HasPrefix(errs, "#") {
if nl := strings.Index(errs, "\n"); nl != -1 {
errs = errs[nl+1:]
}
}
return errs, nil
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift_test.go
|
package swift
import (
"io/ioutil"
"os"
"strconv"
"testing"
"github.com/ncw/swift/swifttest"
"github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/context"
storagedriver "github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver"
"github.com/emerald-ci/test-runner/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites"
"gopkg.in/check.v1"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) }
var swiftDriverConstructor func(prefix string) (*Driver, error)
func init() {
var (
username string
password string
authURL string
tenant string
tenantID string
domain string
domainID string
container string
region string
insecureSkipVerify bool
swiftServer *swifttest.SwiftServer
err error
)
username = os.Getenv("SWIFT_USERNAME")
password = os.Getenv("SWIFT_PASSWORD")
authURL = os.Getenv("SWIFT_AUTH_URL")
tenant = os.Getenv("SWIFT_TENANT_NAME")
tenantID = os.Getenv("SWIFT_TENANT_ID")
domain = os.Getenv("SWIFT_DOMAIN_NAME")
domainID = os.Getenv("SWIFT_DOMAIN_ID")
container = os.Getenv("SWIFT_CONTAINER_NAME")
region = os.Getenv("SWIFT_REGION_NAME")
insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY"))
if username == "" || password == "" || authURL == "" || container == "" {
if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil {
panic(err)
}
username = "swifttest"
password = "swifttest"
authURL = swiftServer.AuthURL
container = "test"
}
prefix, err := ioutil.TempDir("", "driver-")
if err != nil {
panic(err)
}
defer os.Remove(prefix)
swiftDriverConstructor = func(root string) (*Driver, error) {
parameters := Parameters{
username,
password,
authURL,
tenant,
tenantID,
domain,
domainID,
region,
container,
root,
insecureSkipVerify,
defaultChunkSize,
}
return New(parameters)
}
driverConstructor := func() (storagedriver.StorageDriver, error) {
return swiftDriverConstructor(prefix)
}
testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip)
}
func TestEmptyRootList(t *testing.T) {
validRoot, err := ioutil.TempDir("", "driver-")
if err != nil {
t.Fatalf("unexpected error creating temporary directory: %v", err)
}
defer os.Remove(validRoot)
rootedDriver, err := swiftDriverConstructor(validRoot)
if err != nil {
t.Fatalf("unexpected error creating rooted driver: %v", err)
}
emptyRootDriver, err := swiftDriverConstructor("")
if err != nil {
t.Fatalf("unexpected error creating empty root driver: %v", err)
}
slashRootDriver, err := swiftDriverConstructor("/")
if err != nil {
t.Fatalf("unexpected error creating slash root driver: %v", err)
}
filename := "/test"
contents := []byte("contents")
ctx := context.Background()
err = rootedDriver.PutContent(ctx, filename, contents)
if err != nil {
t.Fatalf("unexpected error creating content: %v", err)
}
defer rootedDriver.Delete(ctx, filename)
keys, err := emptyRootDriver.List(ctx, "/")
for _, path := range keys {
if !storagedriver.PathRegexp.MatchString(path) {
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
}
}
keys, err = slashRootDriver.List(ctx, "/")
for _, path := range keys {
if !storagedriver.PathRegexp.MatchString(path) {
t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp)
}
}
}
|
[
"\"SWIFT_USERNAME\"",
"\"SWIFT_PASSWORD\"",
"\"SWIFT_AUTH_URL\"",
"\"SWIFT_TENANT_NAME\"",
"\"SWIFT_TENANT_ID\"",
"\"SWIFT_DOMAIN_NAME\"",
"\"SWIFT_DOMAIN_ID\"",
"\"SWIFT_CONTAINER_NAME\"",
"\"SWIFT_REGION_NAME\"",
"\"SWIFT_INSECURESKIPVERIFY\""
] |
[] |
[
"SWIFT_AUTH_URL",
"SWIFT_DOMAIN_ID",
"SWIFT_DOMAIN_NAME",
"SWIFT_TENANT_NAME",
"SWIFT_REGION_NAME",
"SWIFT_INSECURESKIPVERIFY",
"SWIFT_PASSWORD",
"SWIFT_CONTAINER_NAME",
"SWIFT_USERNAME",
"SWIFT_TENANT_ID"
] |
[]
|
["SWIFT_AUTH_URL", "SWIFT_DOMAIN_ID", "SWIFT_DOMAIN_NAME", "SWIFT_TENANT_NAME", "SWIFT_REGION_NAME", "SWIFT_INSECURESKIPVERIFY", "SWIFT_PASSWORD", "SWIFT_CONTAINER_NAME", "SWIFT_USERNAME", "SWIFT_TENANT_ID"]
|
go
| 10 | 0 | |
pkg/commands/command_install_test.go
|
package commands
import (
"fmt"
"github.com/akamai/cli/pkg/config"
"github.com/akamai/cli/pkg/git"
"github.com/akamai/cli/pkg/packages"
"github.com/akamai/cli/pkg/terminal"
"github.com/fatih/color"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
)
func TestCmdInstall(t *testing.T) {
tests := map[string]struct {
args []string
init func(*testing.T, *mocked)
teardown func(*testing.T)
binaryResponseStatus int
withError string
}{
"install from official akamai repository, build from source": {
args: []string{"test-cmd"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-test-cmd.git"}).Return().Once()
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.gitRepo.On("Clone", "testdata/.akamai-cli/src/cli-test-cmd",
"https://github.com/akamai/cli-test-cmd.git", false, m.term).Return(nil).Once().
Run(func(args mock.Arguments) {
copyFile(t, "./testdata/repo/cli.json", "./testdata/.akamai-cli/src/cli-test-cmd")
})
m.term.On("OK").Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Installing...", []interface{}(nil)).Return().Once()
m.langManager.On("Install", "testdata/.akamai-cli/src/cli-test-cmd",
packages.LanguageRequirements{Go: "1.14.0"}, []string{"app-1-cmd-1"}).Return(nil).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("OK").Return().Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
// list all packages
m.term.On("Writeln", mock.Anything).Return(0, nil)
m.term.On("Printf", mock.Anything, []interface{}(nil)).Return().Times(10)
m.term.On("Printf", mock.Anything, []interface{}{"aliases"}).Return().Twice()
m.term.On("Printf", mock.Anything, []interface{}{"alias"}).Return().Once()
m.term.On("Printf", mock.Anything, []interface{}{"commands.test help [command]"}).Return().Once()
m.term.On("Printf", mock.Anything).Return().Twice()
},
teardown: func(t *testing.T) {
require.NoError(t, os.RemoveAll("./testdata/.akamai-cli/src/cli-test-cmd"))
},
},
"install from official akamai repository, download binary": {
args: []string{"test-cmd"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-test-cmd.git"}).Return().Once()
m.gitRepo.On("Clone", "testdata/.akamai-cli/src/cli-test-cmd",
"https://github.com/akamai/cli-test-cmd.git", false, m.term).Return(nil).Once().
Run(func(args mock.Arguments) {
copyFile(t, "./testdata/repo/cli.json", "./testdata/.akamai-cli/src/cli-test-cmd")
input, err := ioutil.ReadFile("./testdata/.akamai-cli/src/cli-test-cmd/cli.json")
require.NoError(t, err)
output := strings.ReplaceAll(string(input), "${REPOSITORY_URL}", os.Getenv("REPOSITORY_URL"))
err = ioutil.WriteFile("./testdata/.akamai-cli/src/cli-test-cmd/cli.json", []byte(output), 0755)
require.NoError(t, err)
})
m.term.On("Spinner").Return(m.term).Once()
m.term.On("OK").Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Installing...", []interface{}(nil)).Return().Once()
m.langManager.On("Install", "testdata/.akamai-cli/src/cli-test-cmd",
packages.LanguageRequirements{Go: "1.14.0"}, []string{"app-1-cmd-1"}).Return(fmt.Errorf("oops")).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.term.On("Stop", terminal.SpinnerStatusWarn).Return().Once()
m.term.On("Writeln", []interface{}{color.CyanString("oops")}).Return(0, nil).Once()
m.term.On("IsTTY").Return(true).Once()
m.term.On("Confirm", "Binary command(s) found, would you like to download and install it?", true).Return(true, nil).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Downloading binary...", []interface{}(nil)).Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Stop", terminal.SpinnerStatusOK).Return().Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
// list all packages
m.term.On("Printf", mock.AnythingOfType("string"), mock.Anything).Return()
m.term.On("Writeln", mock.Anything).Return(0, nil)
m.term.On("Printf", mock.AnythingOfType("string"), mock.Anything).Return()
},
binaryResponseStatus: http.StatusOK,
teardown: func(t *testing.T) {
require.NoError(t, os.RemoveAll("./testdata/.akamai-cli/src/cli-test-cmd"))
},
},
"package already exists": {
args: []string{"installed"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-installed.git"}).Return().Once()
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
},
withError: color.RedString("Package directory already exists ("),
},
"no args passed": {
args: []string{},
init: func(t *testing.T, m *mocked) {},
withError: "You must specify a repository URL",
},
"git clone error": {
args: []string{"test-cmd"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-test-cmd.git"}).Return().Once()
m.gitRepo.On("Clone", "testdata/.akamai-cli/src/cli-test-cmd",
"https://github.com/akamai/cli-test-cmd.git", false, m.term).Return(fmt.Errorf("oops")).Once().
Run(func(args mock.Arguments) {
copyFile(t, "./testdata/repo/cli.json", "./testdata/.akamai-cli/src/cli-test-cmd")
})
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
},
withError: "Unable to clone repository: oops",
},
"error reading downloaded package, invalid cli.json": {
args: []string{"test-invalid-json"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-test-invalid-json.git"}).Return().Once()
m.gitRepo.On("Clone", "testdata/.akamai-cli/src/cli-test-invalid-json",
"https://github.com/akamai/cli-test-invalid-json.git", false, m.term).Return(nil).Once().
Run(func(args mock.Arguments) {
copyFile(t, "./testdata/repo_invalid_json/cli.json", "./testdata/.akamai-cli/src/cli-test-invalid-json")
})
m.term.On("Spinner").Return(m.term).Once()
m.term.On("OK").Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Installing...", []interface{}(nil)).Return().Once()
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
// list all packages
m.term.On("Printf", mock.AnythingOfType("string"), mock.Anything).Return()
m.term.On("Writeln", mock.Anything).Return(0, nil)
},
teardown: func(t *testing.T) {
require.NoError(t, os.RemoveAll("./testdata/.akamai-cli/src/cli-test-invalid-json"))
},
withError: "Unable to install selected package",
},
"install from official akamai repository, unknown lang": {
args: []string{"test-cmd"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-test-cmd.git"}).Return().Once()
m.gitRepo.On("Clone", "testdata/.akamai-cli/src/cli-test-cmd",
"https://github.com/akamai/cli-test-cmd.git", false, m.term).Return(nil).Once().
Run(func(args mock.Arguments) {
copyFile(t, "./testdata/repo/cli.json", "./testdata/.akamai-cli/src/cli-test-cmd")
})
m.term.On("Spinner").Return(m.term).Once()
m.term.On("OK").Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Installing...", []interface{}(nil)).Return().Once()
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.langManager.On("Install", "testdata/.akamai-cli/src/cli-test-cmd",
packages.LanguageRequirements{Go: "1.14.0"}, []string{"app-1-cmd-1"}).Return(packages.ErrUnknownLang).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("WarnOK").Return().Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
// list all packages
m.term.On("Printf", mock.AnythingOfType("string"), mock.Anything).Return()
m.term.On("Writeln", mock.Anything).Return(0, nil)
},
teardown: func(t *testing.T) {
require.NoError(t, os.RemoveAll("./testdata/.akamai-cli/src/cli-test-cmd"))
},
},
"install from official akamai repository, user does not install binary": {
args: []string{"test-cmd"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-test-cmd.git"}).Return().Once()
m.gitRepo.On("Clone", "testdata/.akamai-cli/src/cli-test-cmd",
"https://github.com/akamai/cli-test-cmd.git", false, m.term).Return(nil).Once().
Run(func(args mock.Arguments) {
copyFile(t, "./testdata/repo/cli.json", "./testdata/.akamai-cli/src/cli-test-cmd")
input, err := ioutil.ReadFile("./testdata/.akamai-cli/src/cli-test-cmd/cli.json")
require.NoError(t, err)
output := strings.ReplaceAll(string(input), "${REPOSITORY_URL}", os.Getenv("REPOSITORY_URL"))
err = ioutil.WriteFile("./testdata/.akamai-cli/src/cli-test-cmd/cli.json", []byte(output), 0755)
require.NoError(t, err)
})
m.term.On("Spinner").Return(m.term).Once()
m.term.On("OK").Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Installing...", []interface{}(nil)).Return().Once()
m.langManager.On("Install", "testdata/.akamai-cli/src/cli-test-cmd",
packages.LanguageRequirements{Go: "1.14.0"}, []string{"app-1-cmd-1"}).Return(fmt.Errorf("oops")).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.term.On("Stop", terminal.SpinnerStatusWarn).Return().Once()
m.term.On("Writeln", []interface{}{color.CyanString("oops")}).Return(0, nil).Once()
m.term.On("IsTTY").Return(true).Once()
m.term.On("Confirm", "Binary command(s) found, would you like to download and install it?", true).Return(false, nil).Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
// list all packages
m.term.On("Printf", mock.AnythingOfType("string"), mock.Anything).Return()
m.term.On("Writeln", mock.Anything).Return(0, nil)
},
teardown: func(t *testing.T) {
require.NoError(t, os.RemoveAll("./testdata/.akamai-cli/src/cli-test-cmd"))
},
withError: "Unable to install selected package",
},
"install from official akamai repository, error downloading binary, invalid URL": {
args: []string{"test-cmd"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-test-cmd.git"}).Return().Once()
m.gitRepo.On("Clone", "testdata/.akamai-cli/src/cli-test-cmd",
"https://github.com/akamai/cli-test-cmd.git", false, m.term).Return(nil).Once().
Run(func(args mock.Arguments) {
copyFile(t, "./testdata/repo/cli.json", "./testdata/.akamai-cli/src/cli-test-cmd")
})
m.term.On("Spinner").Return(m.term).Once()
m.term.On("OK").Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Installing...", []interface{}(nil)).Return().Once()
m.langManager.On("Install", "testdata/.akamai-cli/src/cli-test-cmd",
packages.LanguageRequirements{Go: "1.14.0"}, []string{"app-1-cmd-1"}).Return(fmt.Errorf("oops")).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Stop", terminal.SpinnerStatusWarn).Return().Once()
m.term.On("Writeln", []interface{}{color.CyanString("oops")}).Return(0, nil).Once()
m.term.On("IsTTY").Return(true).Once()
m.term.On("Confirm", "Binary command(s) found, would you like to download and install it?", true).Return(true, nil).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Downloading binary...", []interface{}(nil)).Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
// list all packages
m.term.On("Printf", mock.AnythingOfType("string"), mock.Anything).Return()
m.term.On("Writeln", mock.Anything).Return(0, nil)
},
binaryResponseStatus: http.StatusOK,
withError: "Unable to install selected package",
teardown: func(t *testing.T) {
require.NoError(t, os.RemoveAll("./testdata/.akamai-cli/src/cli-test-cmd"))
},
},
"install from official akamai repository, error downloading binary, invalid response status": {
args: []string{"test-cmd"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-test-cmd.git"}).Return().Once()
m.gitRepo.On("Clone", "testdata/.akamai-cli/src/cli-test-cmd",
"https://github.com/akamai/cli-test-cmd.git", false, m.term).Return(nil).Once().
Run(func(args mock.Arguments) {
copyFile(t, "./testdata/repo/cli.json", "./testdata/.akamai-cli/src/cli-test-cmd")
input, err := ioutil.ReadFile("./testdata/.akamai-cli/src/cli-test-cmd/cli.json")
require.NoError(t, err)
output := strings.ReplaceAll(string(input), "${REPOSITORY_URL}", os.Getenv("REPOSITORY_URL"))
err = ioutil.WriteFile("./testdata/.akamai-cli/src/cli-test-cmd/cli.json", []byte(output), 0755)
require.NoError(t, err)
})
m.term.On("Spinner").Return(m.term).Once()
m.term.On("OK").Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Installing...", []interface{}(nil)).Return().Once()
m.langManager.On("Install", "testdata/.akamai-cli/src/cli-test-cmd",
packages.LanguageRequirements{Go: "1.14.0"}, []string{"app-1-cmd-1"}).Return(fmt.Errorf("oops")).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Stop", terminal.SpinnerStatusWarn).Return().Once()
m.term.On("Writeln", []interface{}{color.CyanString("oops")}).Return(0, nil).Once()
m.term.On("IsTTY").Return(true).Once()
m.term.On("Confirm", "Binary command(s) found, would you like to download and install it?", true).Return(true, nil).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Downloading binary...", []interface{}(nil)).Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
// list all packages
m.term.On("Printf", mock.AnythingOfType("string"), mock.Anything).Return()
m.term.On("Writeln", mock.Anything).Return(0, nil)
},
binaryResponseStatus: http.StatusNotFound,
withError: "Unable to install selected package",
teardown: func(t *testing.T) {
require.NoError(t, os.RemoveAll("./testdata/.akamai-cli/src/cli-test-cmd"))
},
},
"error on install from source, binary does not exist": {
args: []string{"test-cmd"},
init: func(t *testing.T, m *mocked) {
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Attempting to fetch command from %s...", []interface{}{"https://github.com/akamai/cli-test-cmd.git"}).Return().Once()
m.gitRepo.On("Clone", "testdata/.akamai-cli/src/cli-test-cmd",
"https://github.com/akamai/cli-test-cmd.git", false, m.term).Return(nil).Once().
Run(func(args mock.Arguments) {
copyFile(t, "./testdata/repo_no_binary/cli.json", "./testdata/.akamai-cli/src/cli-test-cmd")
input, err := ioutil.ReadFile("./testdata/.akamai-cli/src/cli-test-cmd/cli.json")
require.NoError(t, err)
output := strings.ReplaceAll(string(input), "${REPOSITORY_URL}", os.Getenv("REPOSITORY_URL"))
err = ioutil.WriteFile("./testdata/.akamai-cli/src/cli-test-cmd/cli.json", []byte(output), 0755)
require.NoError(t, err)
})
m.term.On("Spinner").Return(m.term).Once()
m.term.On("OK").Return().Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Start", "Installing...", []interface{}(nil)).Return().Once()
m.langManager.On("Install", "testdata/.akamai-cli/src/cli-test-cmd",
packages.LanguageRequirements{Go: "1.14.0"}, []string{"app-1-cmd-1"}).Return(fmt.Errorf("oops")).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Stop", terminal.SpinnerStatusWarn).Return().Once()
m.term.On("Writeln", []interface{}{color.CyanString("oops")}).Return(0, nil).Once()
m.term.On("Spinner").Return(m.term).Once()
m.term.On("Stop", terminal.SpinnerStatusFail).Return().Once()
m.cfg.On("GetValue", "cli", "enable-cli-statistics").Return("false", true)
// list all packages
m.term.On("Printf", mock.AnythingOfType("string"), mock.Anything).Return()
m.term.On("Writeln", mock.Anything).Return(0, nil)
},
teardown: func(t *testing.T) {
require.NoError(t, os.RemoveAll("./testdata/.akamai-cli/src/cli-test-cmd"))
},
withError: "Unable to install selected package",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "/akamai/cli-test-command/releases/download/1.0.0/akamai-app-1-cmd-1", r.URL.String())
assert.Equal(t, http.MethodGet, r.Method)
w.WriteHeader(test.binaryResponseStatus)
_, err := w.Write([]byte(`binary content`))
assert.NoError(t, err)
}))
defer srv.Close()
require.NoError(t, os.Setenv("REPOSITORY_URL", srv.URL))
require.NoError(t, os.Setenv("AKAMAI_CLI_HOME", "./testdata"))
m := &mocked{&terminal.Mock{}, &config.Mock{}, &git.Mock{}, &packages.Mock{}}
command := &cli.Command{
Name: "install",
Action: cmdInstall(m.gitRepo, m.langManager),
}
app, ctx := setupTestApp(command, m)
args := os.Args[0:1]
args = append(args, "install")
args = append(args, test.args...)
test.init(t, m)
err := app.RunContext(ctx, args)
if test.teardown != nil {
test.teardown(t)
}
m.cfg.AssertExpectations(t)
if test.withError != "" {
assert.Error(t, err)
assert.Contains(t, err.Error(), test.withError)
return
}
require.NoError(t, err)
})
}
}
|
[
"\"REPOSITORY_URL\"",
"\"REPOSITORY_URL\"",
"\"REPOSITORY_URL\"",
"\"REPOSITORY_URL\""
] |
[] |
[
"REPOSITORY_URL"
] |
[]
|
["REPOSITORY_URL"]
|
go
| 1 | 0 | |
code_examples/Python/sync_HPC_preprocessor_with_file/app/PrepWithFile.py
|
"""HPC preprocessor for an HPC demo job
Example of an HPC preprocessor which takes a file and a text input and creates
the necessary parameters for the HPC service.
"""
import os
import logging
from spyne import Application, srpc, ServiceBase, Unicode
from spyne.protocol.soap import Soap11
from spyne.model.fault import Fault
from clfpy import AuthClient, ExtraParameters
# Define the target namespace
TNS = "prepwithfile.sintef.no"
# Define the name under which the service will be deployed
SERVICENAME = "PrepWithFile"
class TokenValidationFailedFault(Fault):
"""Raised when validation of the session token fails"""
pass
class PrepWithFile(ServiceBase):
"""The preprocessor service
Implements a single method which will act as the HPC preprocessor.
Note that the class name is _not_ important for the endpoint URL of the
service (that's defined by __service_url_path__), but it will show up in
the service WSDL as the service name.
"""
@srpc(Unicode, Unicode, Unicode, Unicode,
_returns=(Unicode, Unicode, Unicode, Unicode, Unicode),
_out_variable_names=(
"commandline", "parameters", "queue", "numNodes",
"numCores"
))
def hpcPrepWithFile(sessionToken, extraParameters, filepath, textinput):
"""Creates parameters required as input by the HPC launcher.
"""
# Validate session token
ep = ExtraParameters(extraParameters)
auth = AuthClient(ep.get_auth_WSDL_URL())
if not auth.validate_session_token(sessionToken):
logging.error("Token validation failed")
error_msg = "Session-token validation failed"
raise TokenValidationFailedFault(faultstring=error_msg)
# Prepare parameters the HPC launcher needs
commandline = "python"
parameters = "/app/startup.py {} {}".format(filepath, textinput)
queue = "qexp"
numNodes = os.environ["N_NODES"]
numCores = os.environ["N_CORES"]
return (commandline, parameters, queue, numNodes, numCores)
def create_app():
"""Creates an Application object containing the waiter service."""
app = Application([PrepWithFile], TNS,
in_protocol=Soap11(validator='soft'), out_protocol=Soap11())
return app
|
[] |
[] |
[
"N_CORES",
"N_NODES"
] |
[]
|
["N_CORES", "N_NODES"]
|
python
| 2 | 0 | |
test/test_utils.py
|
"""Tests for ConnectedDriveAccount."""
import datetime
import logging
import os
import time
import unittest
from unittest.mock import Mock
import time_machine
from bimmer_connected.country_selector import get_region_from_name, valid_regions
from bimmer_connected.utils import get_class_property_names, parse_datetime, to_json
from . import RESPONSE_DIR, VIN_G21
from .test_account import get_mocked_account
class TestUtils(unittest.TestCase):
"""Tests for utils."""
def test_drive_train(self):
"""Tests available attribute."""
vehicle = get_mocked_account().get_vehicle(VIN_G21)
self.assertEqual(
[
"available_attributes",
"available_state_services",
"brand",
"charging_profile",
"drive_train",
"drive_train_attributes",
"has_hv_battery",
"has_internal_combustion_engine",
"has_range_extender",
"has_weekly_planner_service",
"is_vehicle_tracking_enabled",
"lsc_type",
"name",
"to_json",
],
get_class_property_names(vehicle),
)
@time_machine.travel("2011-11-28 21:28:59 +0000", tick=False)
def test_to_json(self):
"""Test serialization to JSON."""
# Force UTC
os.environ["TZ"] = "UTC"
time.tzset()
account = get_mocked_account()
account.timezone = Mock(return_value=datetime.timezone.utc)
vehicle = account.get_vehicle(VIN_G21)
# Unset UTC after vehicle has been loaded
del os.environ["TZ"]
time.tzset()
with open(RESPONSE_DIR / "G21" / "json_export.json", "rb") as file:
expected = file.read().decode("UTF-8")
self.assertEqual(expected, to_json(vehicle, indent=4))
def test_parse_datetime(self):
"""Test datetime parser."""
dt_with_milliseconds = datetime.datetime(2021, 11, 12, 13, 14, 15, 567000, tzinfo=datetime.timezone.utc)
dt_without_milliseconds = datetime.datetime(2021, 11, 12, 13, 14, 15, tzinfo=datetime.timezone.utc)
self.assertEqual(dt_with_milliseconds, parse_datetime("2021-11-12T13:14:15.567Z"))
self.assertEqual(dt_without_milliseconds, parse_datetime("2021-11-12T13:14:15Z"))
with self.assertLogs(level=logging.ERROR):
self.assertIsNone(parse_datetime("2021-14-12T13:14:15Z"))
class TestCountrySelector(unittest.TestCase):
"""Tests for Country Selector."""
def test_valid_regions(self):
"""Test valid regions."""
self.assertListEqual(["north_america", "china", "rest_of_world"], valid_regions())
def test_unknown_region(self):
"""Test unkown region."""
with self.assertRaises(ValueError):
get_region_from_name("unkown")
|
[] |
[] |
[
"TZ"
] |
[]
|
["TZ"]
|
python
| 1 | 0 | |
cmd_edit.go
|
package main
import (
"os"
"os/exec"
)
func (a *App) CmdEdit(p *CommandLineEditParam) error {
f := a.ConfigFile()
editor := os.Getenv("EDITOR")
if p.Editor != "" {
editor = p.Editor
}
cmd := exec.Command(editor, f)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
src/app.py
|
import os
from flask import Flask, escape, request, jsonify
from marshmallow import ValidationError
from flask_pymongo import PyMongo
from src.auth.auth_exception import UserExistsException, UserNotFoundException, AccessDeniedException
from src.auth.controllers.auth import auth_blueprint
import src.settings
from src.secret.controllers.secret import secret_blueprint
app = Flask(__name__)
app.config["MONGO_URI"] = os.environ.get('MONGO_URL', 'mongodb://localhost:27017/db')
print(os.environ.get('MONGO_URL'))
mongo = PyMongo(app)
# set default version to v1
version = os.environ.get('API_VERSION', 'v1')
prefix = f"/api/{version}"
@app.errorhandler(ValidationError)
def validation_error_handler(err):
errors = err.messages
return jsonify(errors), 400
@app.errorhandler(UserExistsException)
def user_error_handler(e):
return jsonify({"error": e.msg}), 400
@app.errorhandler(AccessDeniedException)
def user_error_handler(e):
return jsonify({"error": e.msg}), 401
@app.errorhandler(UserNotFoundException)
def user_error_handler(e):
return jsonify({"error": e.msg}), 404
app.register_blueprint(auth_blueprint, url_prefix=f'{prefix}/auth')
app.register_blueprint(secret_blueprint, url_prefix=f'{prefix}/secret')
@app.route(f'{prefix}/ping', methods=['GET'])
def ping():
"""
Check if server is alive
:return: "pong"
"""
return "pong"
|
[] |
[] |
[
"API_VERSION",
"MONGO_URL"
] |
[]
|
["API_VERSION", "MONGO_URL"]
|
python
| 2 | 0 | |
pipeline/trusted-resources/cmd/webhook_trusted_resource/main.go
|
/*
Copyright 2022 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"log"
"net/http"
"os"
"github.com/tektoncd/experimental/pipelines/trusted-resources/pkg/config"
taskvalidation "github.com/tektoncd/experimental/pipelines/trusted-resources/pkg/trustedtask"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/logging"
"knative.dev/pkg/signals"
"knative.dev/pkg/system"
"knative.dev/pkg/webhook"
"knative.dev/pkg/webhook/certificates"
"knative.dev/pkg/webhook/resourcesemantics"
"knative.dev/pkg/webhook/resourcesemantics/validation"
)
var types = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{
// TODO: Add other types
// v1beta1
v1beta1.SchemeGroupVersion.WithKind("TaskRun"): &taskvalidation.TrustedTaskRun{},
}
func newValidationAdmissionController(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
store := config.NewConfigStore(logging.FromContext(ctx).Named("config-store"))
store.WatchConfigs(cmw)
return validation.NewAdmissionController(ctx,
// Name of the resource webhook.
"validation.trustedresources.webhook.pipeline.tekton.dev",
// The path on which to serve the webhook.
"/resource-validation",
// The resources to validate and default.
types,
// A function that infuses the context passed to Validate/SetDefaults with custom metadata.
func(ctx context.Context) context.Context {
return store.ToContext(ctx)
},
// Whether to disallow unknown fields.
true,
)
}
func main() {
serviceName := os.Getenv("WEBHOOK_SERVICE_NAME")
if serviceName == "" {
serviceName = "tekton-trusted-resources-webhook"
}
secretName := os.Getenv("WEBHOOK_SECRET_NAME")
if secretName == "" {
secretName = "trusted-resources-webhook-certs" // #nosec
}
// Scope informers to the webhook's namespace instead of cluster-wide
ctx := injection.WithNamespaceScope(signals.NewContext(), system.Namespace())
// Set up a signal context with our webhook options
ctx = webhook.WithOptions(ctx, webhook.Options{
ServiceName: serviceName,
Port: 8443,
SecretName: secretName,
})
mux := http.NewServeMux()
mux.HandleFunc("/", handler)
mux.HandleFunc("/health", handler)
mux.HandleFunc("/readiness", handler)
port := os.Getenv("PROBES_PORT")
if port == "" {
port = "8080"
}
go func() {
// start the web server on port and accept requests
log.Printf("Readiness and health check server listening on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, mux))
}()
sharedmain.MainWithContext(ctx, serviceName,
certificates.NewController,
newValidationAdmissionController,
)
}
func handler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
|
[
"\"WEBHOOK_SERVICE_NAME\"",
"\"WEBHOOK_SECRET_NAME\"",
"\"PROBES_PORT\""
] |
[] |
[
"WEBHOOK_SECRET_NAME",
"PROBES_PORT",
"WEBHOOK_SERVICE_NAME"
] |
[]
|
["WEBHOOK_SECRET_NAME", "PROBES_PORT", "WEBHOOK_SERVICE_NAME"]
|
go
| 3 | 0 | |
backend/server/server/wsgi.py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server.settings')
application = get_wsgi_application()
# ML registry
import inspect
from apps.ml.registry import MLRegistry
from apps.ml.income_classifier.random_forest import RandomForestClassifier
from apps.ml.income_classifier.extra_trees import ExtraTreesClassifier
try:
registry = MLRegistry() # create ML registry
# Random Forest classifier
rf = RandomForestClassifier()
# add to ML registry
registry.add_algorithm(endpoint_name="income_classifier",
algorithm_object=rf,
algorithm_name="random forest",
algorithm_status="production",
algorithm_version="0.0.1",
owner="Troye",
algorithm_description="Random Forest with simple pre- and post-processing",
algorithm_code=inspect.getsource(RandomForestClassifier))
#Extra Trees Classifier
et = ExtraTreesClassifier()
# add to ML registry
registry.add_algorithm(endpoint_name="income_classifier",
algorithm_object=et,
algorithm_name = "extra trees",
algorithm_status="testing",
algorithm_version="0.0.1",
owner="Gilbert",
algorithm_description="Extra Trees with simple pre- and post-processing",
algorithm_code=inspect.getsource(RandomForestClassifier))
except Exception as e:
print("Exception while loading the algorithms to the registry,", str(e))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/containerd/containerd/archive/tar.go
|
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"archive/tar"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/containerd/containerd/log"
"github.com/containerd/continuity/fs"
"github.com/pkg/errors"
)
var bufPool = &sync.Pool{
New: func() interface{} {
buffer := make([]byte, 32*1024)
return &buffer
},
}
var errInvalidArchive = errors.New("invalid archive")
// Diff returns a tar stream of the computed filesystem
// difference between the provided directories.
//
// Produces a tar using OCI style file markers for deletions. Deleted
// files will be prepended with the prefix ".wh.". This style is
// based off AUFS whiteouts.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
func Diff(ctx context.Context, a, b string) io.ReadCloser {
r, w := io.Pipe()
go func() {
err := WriteDiff(ctx, w, a, b)
if err = w.CloseWithError(err); err != nil {
log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
}
}()
return r
}
// WriteDiff writes a tar stream of the computed difference between the
// provided directories.
//
// Produces a tar using OCI style file markers for deletions. Deleted
// files will be prepended with the prefix ".wh.". This style is
// based off AUFS whiteouts.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
func WriteDiff(ctx context.Context, w io.Writer, a, b string) error {
cw := newChangeWriter(w, b)
err := fs.Changes(ctx, a, b, cw.HandleChange)
if err != nil {
return errors.Wrap(err, "failed to create diff tar stream")
}
return cw.Close()
}
const (
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
whiteoutPrefix = ".wh."
// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
// whiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
paxSchilyXattr = "SCHILY.xattrs."
)
// Apply applies a tar stream of an OCI style diff tar.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) {
root = filepath.Clean(root)
var options ApplyOptions
for _, opt := range opts {
if err := opt(&options); err != nil {
return 0, errors.Wrap(err, "failed to apply option")
}
}
if options.Filter == nil {
options.Filter = all
}
return apply(ctx, root, tar.NewReader(r), options)
}
// applyNaive applies a tar stream of an OCI style diff tar.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
var (
dirs []*tar.Header
// Used for handling opaque directory markers which
// may occur out of order
unpackedPaths = make(map[string]struct{})
// Used for aufs plink directory
aufsTempdir = ""
aufsHardlinks = make(map[string]*tar.Header)
)
// Iterate through the files in the archive.
for {
select {
case <-ctx.Done():
return 0, ctx.Err()
default:
}
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
accept, err := options.Filter(hdr)
if err != nil {
return 0, err
}
if !accept {
continue
}
if skipFile(hdr) {
log.G(ctx).Warnf("file %q ignored: archive may not be supported on system", hdr.Name)
continue
}
// Split name and resolve symlinks for root directory.
ppath, base := filepath.Split(hdr.Name)
ppath, err = fs.RootPath(root, ppath)
if err != nil {
return 0, errors.Wrap(err, "failed to get root path")
}
// Join to root before joining to parent path to ensure relative links are
// already resolved based on the root before adding to parent.
path := filepath.Join(ppath, filepath.Join("/", base))
if path == root {
log.G(ctx).Debugf("file %q ignored: resolved to root", hdr.Name)
continue
}
// If file is not directly under root, ensure parent directory
// exists or is created.
if ppath != root {
parentPath := ppath
if base == "" {
parentPath = filepath.Dir(path)
}
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = mkdirAll(parentPath, 0700)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
p, err := fs.RootPath(aufsTempdir, basename)
if err != nil {
return 0, err
}
if err := createTarFile(ctx, p, root, hdr, tr); err != nil {
return 0, err
}
}
if hdr.Name != whiteoutOpaqueDir {
continue
}
}
if strings.HasPrefix(base, whiteoutPrefix) {
dir := filepath.Dir(path)
if base == whiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
continue
}
originalBase := base[len(whiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
// Ensure originalPath is under dir
if dir[len(dir)-1] != filepath.Separator {
dir += string(filepath.Separator)
}
if !strings.HasPrefix(originalPath, dir) {
return 0, errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
}
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
continue
}
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
srcData := io.Reader(tr)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
p, err := fs.RootPath(aufsTempdir, linkBasename)
if err != nil {
return 0, err
}
tmpFile, err := os.Open(p)
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
for _, hdr := range dirs {
path, err := fs.RootPath(root, hdr.Name)
if err != nil {
return 0, err
}
if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil {
return 0, err
}
}
return size, nil
}
func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader) error {
// hdr.Mode is in linux format, which we can use for syscalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
hdrInfo := hdr.FileInfo()
switch hdr.Typeflag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
if err := mkdir(path, hdrInfo.Mode()); err != nil {
return err
}
}
case tar.TypeReg, tar.TypeRegA:
file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode())
if err != nil {
return err
}
_, err = copyBuffered(ctx, file, reader)
if err1 := file.Close(); err == nil {
err = err1
}
if err != nil {
return err
}
case tar.TypeBlock, tar.TypeChar:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
}
case tar.TypeFifo:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
}
case tar.TypeLink:
targetPath, err := hardlinkRootPath(extractDir, hdr.Linkname)
if err != nil {
return err
}
if err := os.Link(targetPath, path); err != nil {
return err
}
case tar.TypeSymlink:
if err := os.Symlink(hdr.Linkname, path); err != nil {
return err
}
case tar.TypeXGlobalHeader:
log.G(ctx).Debug("PAX Global Extended Headers found and ignored")
return nil
default:
return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag)
}
// Lchown is not supported on Windows.
if runtime.GOOS != "windows" {
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
return err
}
}
for key, value := range hdr.PAXRecords {
if strings.HasPrefix(key, paxSchilyXattr) {
key = key[len(paxSchilyXattr):]
if err := setxattr(path, key, value); err != nil {
if errors.Cause(err) == syscall.ENOTSUP {
log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
continue
}
return err
}
}
}
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
if err := handleLChmod(hdr, path, hdrInfo); err != nil {
return err
}
return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime))
}
type changeWriter struct {
tw *tar.Writer
source string
whiteoutT time.Time
inodeSrc map[uint64]string
inodeRefs map[uint64][]string
addedDirs map[string]struct{}
}
func newChangeWriter(w io.Writer, source string) *changeWriter {
return &changeWriter{
tw: tar.NewWriter(w),
source: source,
whiteoutT: time.Now(),
inodeSrc: map[uint64]string{},
inodeRefs: map[uint64][]string{},
addedDirs: map[string]struct{}{},
}
}
func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if k == fs.ChangeKindDelete {
whiteOutDir := filepath.Dir(p)
whiteOutBase := filepath.Base(p)
whiteOut := filepath.Join(whiteOutDir, whiteoutPrefix+whiteOutBase)
hdr := &tar.Header{
Typeflag: tar.TypeReg,
Name: whiteOut[1:],
Size: 0,
ModTime: cw.whiteoutT,
AccessTime: cw.whiteoutT,
ChangeTime: cw.whiteoutT,
}
if err := cw.includeParents(hdr); err != nil {
return err
}
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write whiteout header")
}
} else {
var (
link string
err error
source = filepath.Join(cw.source, p)
)
switch {
case f.Mode()&os.ModeSocket != 0:
return nil // ignore sockets
case f.Mode()&os.ModeSymlink != 0:
if link, err = os.Readlink(source); err != nil {
return err
}
}
hdr, err := tar.FileInfoHeader(f, link)
if err != nil {
return err
}
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
name := p
if strings.HasPrefix(name, string(filepath.Separator)) {
name, err = filepath.Rel(string(filepath.Separator), name)
if err != nil {
return errors.Wrap(err, "failed to make path relative")
}
}
name, err = tarName(name)
if err != nil {
return errors.Wrap(err, "cannot canonicalize path")
}
// suffix with '/' for directories
if f.IsDir() && !strings.HasSuffix(name, "/") {
name += "/"
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, f); err != nil {
return errors.Wrap(err, "failed to set device headers")
}
// additionalLinks stores file names which must be linked to
// this file when this file is added
var additionalLinks []string
inode, isHardlink := fs.GetLinkInfo(f)
if isHardlink {
// If the inode has a source, always link to it
if source, ok := cw.inodeSrc[inode]; ok {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = source
hdr.Size = 0
} else {
if k == fs.ChangeKindUnmodified {
cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name)
return nil
}
cw.inodeSrc[inode] = name
additionalLinks = cw.inodeRefs[inode]
delete(cw.inodeRefs, inode)
}
} else if k == fs.ChangeKindUnmodified {
// Nothing to write to diff
return nil
}
if capability, err := getxattr(source, "security.capability"); err != nil {
return errors.Wrap(err, "failed to get capabilities xattr")
} else if capability != nil {
if hdr.PAXRecords == nil {
hdr.PAXRecords = map[string]string{}
}
hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability)
}
if err := cw.includeParents(hdr); err != nil {
return err
}
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write file header")
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
file, err := open(source)
if err != nil {
return errors.Wrapf(err, "failed to open path: %v", source)
}
defer file.Close()
n, err := copyBuffered(context.TODO(), cw.tw, file)
if err != nil {
return errors.Wrap(err, "failed to copy")
}
if n != hdr.Size {
return errors.New("short write copying file")
}
}
if additionalLinks != nil {
source = hdr.Name
for _, extra := range additionalLinks {
hdr.Name = extra
hdr.Typeflag = tar.TypeLink
hdr.Linkname = source
hdr.Size = 0
if err := cw.includeParents(hdr); err != nil {
return err
}
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write file header")
}
}
}
}
return nil
}
func (cw *changeWriter) Close() error {
if err := cw.tw.Close(); err != nil {
return errors.Wrap(err, "failed to close tar writer")
}
return nil
}
func (cw *changeWriter) includeParents(hdr *tar.Header) error {
name := strings.TrimRight(hdr.Name, "/")
fname := filepath.Join(cw.source, name)
parent := filepath.Dir(name)
pname := filepath.Join(cw.source, parent)
// Do not include root directory as parent
if fname != cw.source && pname != cw.source {
_, ok := cw.addedDirs[parent]
if !ok {
cw.addedDirs[parent] = struct{}{}
fi, err := os.Stat(pname)
if err != nil {
return err
}
if err := cw.HandleChange(fs.ChangeKindModify, parent, fi, nil); err != nil {
return err
}
}
}
if hdr.Typeflag == tar.TypeDir {
cw.addedDirs[name] = struct{}{}
}
return nil
}
func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
buf := bufPool.Get().(*[]byte)
defer bufPool.Put(buf)
for {
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
}
nr, er := src.Read(*buf)
if nr > 0 {
nw, ew := dst.Write((*buf)[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er != nil {
if er != io.EOF {
err = er
}
break
}
}
return written, err
}
// hardlinkRootPath returns target linkname, evaluating and bounding any
// symlink to the parent directory.
//
// NOTE: Allow hardlink to the softlink, not the real one. For example,
//
// touch /tmp/zzz
// ln -s /tmp/zzz /tmp/xxx
// ln /tmp/xxx /tmp/yyy
//
// /tmp/yyy should be softlink which be same of /tmp/xxx, not /tmp/zzz.
func hardlinkRootPath(root, linkname string) (string, error) {
ppath, base := filepath.Split(linkname)
ppath, err := fs.RootPath(root, ppath)
if err != nil {
return "", err
}
targetPath := filepath.Join(ppath, base)
if !strings.HasPrefix(targetPath, root) {
targetPath = root
}
return targetPath, nil
}
|
[
"\"XDG_RUNTIME_DIR\""
] |
[] |
[
"XDG_RUNTIME_DIR"
] |
[]
|
["XDG_RUNTIME_DIR"]
|
go
| 1 | 0 | |
tests/runtests.py
|
#!/usr/bin/env python
import argparse
import atexit
import copy
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import django
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import get_runner
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.log import DEFAULT_LOGGING
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango30Warning)
# Make runtime warning errors to ensure no usage of error prone patterns.
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_runner_apps',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
'flatpages_tests': 'django.contrib.flatpages',
'redirects_tests': 'django.contrib.redirects',
}
def get_test_modules():
modules = []
discovery_paths = [(None, RUNTESTS_DIR)]
if connection.features.gis_enabled:
# GIS tests are in nested apps
discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')))
else:
SUBDIRS_TO_SKIP.append('gis_tests')
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' not in f and
os.path.basename(f) not in SUBDIRS_TO_SKIP and
not os.path.isfile(f) and
os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
modules.append((modpath, f))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels, parallel):
# Reduce the given test labels to just the app module path.
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__)
max_parallel = default_test_processes() if parallel == 0 else parallel
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE': settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
'auth': None,
'contenttypes': None,
'sessions': None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
'fields.W342', # ForeignKey(unique=True) -> OneToOneField
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed
# without raising AppRegistryNotReady when running gis_tests in isolation
# on some backends (e.g. PostGIS).
if 'gis_tests' in test_labels_set and not connection.features.gis_enabled:
print('Aborting: A GIS database backend is required to run gis_tests.')
sys.exit(1)
# Load all the test model apps.
test_modules = get_test_modules()
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = modpath + '.' + module_name
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
module_found_in_labels = not test_labels or any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set
)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
settings.INSTALLED_APPS.append(CONTRIB_TESTS_TO_APPS[module_name])
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run on Python 3.6+ (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
def actual_test_processes(parallel):
if parallel == 0:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
return default_test_processes()
else:
return 1
else:
return parallel
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
browsers = values.split(',')
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser)
setattr(namespace, self.dest, browsers)
def django_tests(verbosity, interactive, failfast, keepdb, reverse,
test_labels, debug_sql, parallel, tags, exclude_tags):
state = setup(verbosity, test_labels, parallel)
extra_tests = []
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=actual_test_processes(parallel),
tags=tags,
exclude_tags=exclude_tags,
)
failures = test_runner.run_tests(
test_labels or get_installed(),
extra_tests=extra_tests,
)
teardown(state)
return failures
def get_subprocess_args(options):
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings
]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
if options.tags:
subprocess_args.append('--tag=%s' % options.tags)
if options.exclude_tags:
subprocess_args.append('--exclude_tag=%s' % options.exclude_tags)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, parallel):
state = setup(options.verbosity, test_labels, parallel)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels, parallel):
state = setup(options.verbosity, test_labels, parallel)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
'modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output',
)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_true', dest='failfast',
help='Tells Django to stop running the test suite after first failed test.',
)
parser.add_argument(
'-k', '--keepdb', action='store_true', dest='keepdb',
help='Tells Django to preserve the test database between runs.',
)
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
'--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.',
)
parser.add_argument(
'--pair',
help='Run the test suite in pairs with the named test to find problem pairs.',
)
parser.add_argument(
'--reverse', action='store_true',
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.',
)
parser.add_argument(
'--selenium', dest='selenium', action=ActionSelenium, metavar='BROWSERS',
help='A comma-separated list of browsers to run the Selenium tests against.',
)
parser.add_argument(
'--debug-sql', action='store_true', dest='debug_sql',
help='Turn on the SQL query logger within tests.',
)
parser.add_argument(
'--parallel', dest='parallel', nargs='?', default=0, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', dest='tags', action='append',
help='Run only tests with the specified tags. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', dest='exclude_tags', action='append',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
options = parser.parse_args()
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite')
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.selenium:
if not options.tags:
options.tags = ['selenium']
elif 'selenium' not in options.tags:
options.tags.append('selenium')
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(options.bisect, options, options.modules, options.parallel)
elif options.pair:
paired_tests(options.pair, options, options.modules, options.parallel)
else:
failures = django_tests(
options.verbosity, options.interactive, options.failfast,
options.keepdb, options.reverse, options.modules,
options.debug_sql, options.parallel, options.tags,
options.exclude_tags,
)
if failures:
sys.exit(1)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE",
"TMPDIR"
] |
[]
|
["DJANGO_SETTINGS_MODULE", "TMPDIR"]
|
python
| 2 | 0 | |
Sermons/src/main/java/org/christchurchmayfair/api/sermons/GithubLogin.java
|
package org.christchurchmayfair.api.sermons;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.HttpMethod;
import com.amazonaws.SdkClientException;
import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.RequestHandler;
import com.amazonaws.services.lambda.runtime.events.APIGatewayProxyRequestEvent;
import com.amazonaws.services.lambda.runtime.events.APIGatewayProxyResponseEvent;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.GeneratePresignedUrlRequest;
import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder;
import com.amazonaws.services.secretsmanager.model.DecryptionFailureException;
import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest;
import com.amazonaws.services.secretsmanager.model.GetSecretValueResult;
import com.amazonaws.services.secretsmanager.model.InternalServiceErrorException;
import com.amazonaws.services.secretsmanager.model.InvalidParameterException;
import com.amazonaws.services.secretsmanager.model.InvalidRequestException;
import com.amazonaws.services.secretsmanager.model.ResourceNotFoundException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import org.apache.http.entity.ContentType;
import org.christchurchmayfair.api.sermons.model.GithubAccessTokenResponse;
import org.christchurchmayfair.api.sermons.model.LoginRequest;
import org.christchurchmayfair.api.sermons.model.Sermon;
import org.christchurchmayfair.api.sermons.persistence.SermonDataStore;
import java.io.IOException;
import java.net.URL;
import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
import javax.ws.rs.client.Client;
import javax.ws.rs.client.ClientBuilder;
import javax.ws.rs.client.Entity;
import javax.ws.rs.client.WebTarget;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* Handler for requests to Lambda function.
*/
public class GithubLogin implements RequestHandler<APIGatewayProxyRequestEvent, APIGatewayProxyResponseEvent> {
protected ObjectMapper objectMapper;
protected Map<String, String> responseHeaders = new HashMap<>();
protected APIGatewayProxyResponseEvent response = new APIGatewayProxyResponseEvent();
private String githubOAuthClientSecret;
private String clientId = System.getenv("GITHUB_CLIENT_ID");
public GithubLogin() {
super();
String secretName = System.getenv("GITHUB_OAUTH_SECRET_NAME");
String region = System.getenv("GITHUB_OAUTH_SECRET_REGION");
AWSSecretsManager awsSecretsManager = AWSSecretsManagerClientBuilder.standard()
.withRegion(region)
.build();
String secretPayload = "";
String decodedBinarySecret = "";
GetSecretValueRequest getSecretValueRequest = new GetSecretValueRequest()
.withSecretId(secretName);
GetSecretValueResult getSecretValueResult = null;
try {
getSecretValueResult = awsSecretsManager.getSecretValue(getSecretValueRequest);
} catch (DecryptionFailureException e) {
// Secrets Manager can't decrypt the protected secret text using the provided KMS key.
// Deal with the exception here, and/or rethrow at your discretion.
throw e;
} catch (InternalServiceErrorException e) {
// An error occurred on the server side.
// Deal with the exception here, and/or rethrow at your discretion.
throw e;
} catch (InvalidParameterException e) {
// You provided an invalid value for a parameter.
// Deal with the exception here, and/or rethrow at your discretion.
throw e;
} catch (InvalidRequestException e) {
// You provided a parameter value that is not valid for the current state of the resource.
// Deal with the exception here, and/or rethrow at your discretion.
throw e;
} catch (ResourceNotFoundException e) {
// We can't find the resource that you asked for.
// Deal with the exception here, and/or rethrow at your discretion.
throw e;
}
// Decrypts secret using the associated KMS CMK.
// Depending on whether the secret is a string or binary, one of these fields will be populated.
if (getSecretValueResult.getSecretString() != null) {
secretPayload = getSecretValueResult.getSecretString();
}
else {
decodedBinarySecret = new String(Base64.getDecoder().decode(getSecretValueResult.getSecretBinary()).array());
}
objectMapper = new ObjectMapper();
objectMapper.registerModule(new Jdk8Module());
objectMapper.registerModule(new JavaTimeModule());
objectMapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false);
String allowedOrigin = System.getenv("CORS_ALLOW_ORIGIN");
responseHeaders.put("Access-Control-Allow-Origin", allowedOrigin);
responseHeaders.put("Access-Control-Allow-Methods", "GET,PUT,POST");
responseHeaders.put("Access-Control-Allow-Headers", "Content-Type, Referer, User-Agent, Accept");
try {
githubOAuthClientSecret = objectMapper.readTree(secretPayload).get("GITHUB_OAUTH_CLIENT_SECRET").asText();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public APIGatewayProxyResponseEvent handleRequest(APIGatewayProxyRequestEvent input, Context context) {
LoginRequest loginRequest;
try {
if (input.getIsBase64Encoded()) {
final byte[] bytes = Base64.getDecoder().decode(input.getBody());
loginRequest = objectMapper.readValue(bytes, LoginRequest.class);
} else {
loginRequest = objectMapper.readValue(input.getBody(), LoginRequest.class);
}
} catch (Exception e) {
responseHeaders.put("Content-Type","text/plain");
response.setHeaders(responseHeaders);
response.setBody("Problems converting string to json for request" + e.getMessage());
response.setStatusCode(500);
return response;
}
System.out.println(loginRequest);
Client client = ClientBuilder.newClient();
final WebTarget target = client.target("https://github.com/login/oauth/access_token")
.queryParam("client_id", clientId)
.queryParam("client_secret", githubOAuthClientSecret)
.queryParam("code", loginRequest.getCode())
.queryParam("state", loginRequest.getState());
System.out.println(target);
final Response post = target.request(MediaType.APPLICATION_JSON_TYPE).post(null);
System.out.println(post.getStatus());
System.out.println(post.getHeaders());
final GithubAccessTokenResponse githubAccessTokenResponse = post.readEntity(GithubAccessTokenResponse.class);
System.out.println(githubAccessTokenResponse);
try {
response.setBody(objectMapper.writeValueAsString(githubAccessTokenResponse));
responseHeaders.put("Content-Type","application/json");
} catch (JsonProcessingException e) {
e.printStackTrace();
response.setBody("Problems converting json to string");
return response;
}
response.setHeaders(responseHeaders);
return response;
}
}
|
[
"\"GITHUB_CLIENT_ID\"",
"\"GITHUB_OAUTH_SECRET_NAME\"",
"\"GITHUB_OAUTH_SECRET_REGION\"",
"\"CORS_ALLOW_ORIGIN\""
] |
[] |
[
"GITHUB_OAUTH_SECRET_REGION",
"GITHUB_OAUTH_SECRET_NAME",
"GITHUB_CLIENT_ID",
"CORS_ALLOW_ORIGIN"
] |
[]
|
["GITHUB_OAUTH_SECRET_REGION", "GITHUB_OAUTH_SECRET_NAME", "GITHUB_CLIENT_ID", "CORS_ALLOW_ORIGIN"]
|
java
| 4 | 0 | |
police.go
|
package main
import (
"fmt"
"log"
"os"
"github.com/bwmarrin/discordgo"
)
const urlRegexString string = `(?:(?:https?|ftp):\/\/|\b(?:[a-z\d]+\.))(?:(?:[^\s()<>]+|\((?:[^\s()<>]+|(?:\([^\s()<>]+\)))?\))+(?:\((?:[^\s()<>]+|(?:\(?:[^\s()<>]+\)))?\)|[^\s!()\[\]{};:'".,<>?«»“”‘’]))?`
var (
policeChannel *discordgo.Channel
)
func initPoliceChannel(s *discordgo.Session) {
channelId := os.Getenv("VPBOT_POLICE_CHANNEL")
if(len(channelId) <= 0) {
return
}
var err error
policeChannel, err = s.Channel(channelId)
if err != nil {
log.Printf("Couldn't find the police with ID: %s", channelId)
return
}
}
func msgStreamPoliceHandler(session *discordgo.Session, msg *discordgo.MessageCreate) {
if policeChannel != nil && msg.ChannelID == policeChannel.ID {
urlInMessage := urlRegex.MatchString(msg.Content)
if len(msg.Attachments) <= 0 && len(msg.Embeds) <= 0 && urlInMessage == false {
guild, _ := session.State.Guild(msg.GuildID)
channel, _ := session.State.Channel(msg.ChannelID)
log.Printf("[%s|%s] Message did not furfill requirements! deleting message (%s) from %s#%s\n%s", guild.Name, channel.Name, msg.ID, msg.Author.Username, msg.Author.Discriminator, msg.Content)
session.ChannelMessageDelete(channel.ID, msg.ID)
sendPoliceDM(session, msg.Author, guild, channel, "Message was deleted", "Showcase messages require that either you include a link or a picture/file in your message, if you believe your message has been wrongfully deleted, please contact a mod.\n If you wish to chat about showcase, please look for a #showcase-banter channel")
}
}
}
func sendPoliceDM(s *discordgo.Session, user *discordgo.User, guild *discordgo.Guild, channel *discordgo.Channel, event string, reason string) {
dm, err := s.UserChannelCreate(user.ID)
if err == nil {
s.ChannelMessageSend(dm.ID, fmt.Sprintf("%s in '%s' channel '%s', reason:\n%s", event, guild.Name, channel.Name, reason))
}
}
|
[
"\"VPBOT_POLICE_CHANNEL\""
] |
[] |
[
"VPBOT_POLICE_CHANNEL"
] |
[]
|
["VPBOT_POLICE_CHANNEL"]
|
go
| 1 | 0 | |
src/cmd/go/internal/modload/init.go
|
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modload
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"go/build"
"internal/lazyregexp"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/lockedfile"
"cmd/go/internal/modconv"
"cmd/go/internal/modfetch"
"cmd/go/internal/mvs"
"cmd/go/internal/search"
"golang.org/x/mod/modfile"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
)
var (
initialized bool
modRoot string
Target module.Version
// targetPrefix is the path prefix for packages in Target, without a trailing
// slash. For most modules, targetPrefix is just Target.Path, but the
// standard-library module "std" has an empty prefix.
targetPrefix string
// targetInGorootSrc caches whether modRoot is within GOROOT/src.
// The "std" module is special within GOROOT/src, but not otherwise.
targetInGorootSrc bool
gopath string
CmdModInit bool // running 'go mod init'
CmdModModule string // module argument for 'go mod init'
// RootMode determines whether a module root is needed.
RootMode Root
// ForceUseModules may be set to force modules to be enabled when
// GO111MODULE=auto or to report an error when GO111MODULE=off.
ForceUseModules bool
allowMissingModuleImports bool
)
type Root int
const (
// AutoRoot is the default for most commands. modload.Init will look for
// a go.mod file in the current directory or any parent. If none is found,
// modules may be disabled (GO111MODULE=on) or commands may run in a
// limited module mode.
AutoRoot Root = iota
// NoRoot is used for commands that run in module mode and ignore any go.mod
// file the current directory or in parent directories.
NoRoot
// NeedRoot is used for commands that must run in module mode and don't
// make sense without a main module.
NeedRoot
)
// ModFile returns the parsed go.mod file.
//
// Note that after calling LoadPackages or LoadAllModules,
// the require statements in the modfile.File are no longer
// the source of truth and will be ignored: edits made directly
// will be lost at the next call to WriteGoMod.
// To make permanent changes to the require statements
// in go.mod, edit it before loading.
func ModFile() *modfile.File {
Init()
if modFile == nil {
die()
}
return modFile
}
func BinDir() string {
Init()
return filepath.Join(gopath, "bin")
}
// Init determines whether module mode is enabled, locates the root of the
// current module (if any), sets environment variables for Git subprocesses, and
// configures the cfg, codehost, load, modfetch, and search packages for use
// with modules.
func Init() {
if initialized {
return
}
initialized = true
// Keep in sync with WillBeEnabled. We perform extra validation here, and
// there are lots of diagnostics and side effects, so we can't use
// WillBeEnabled directly.
var mustUseModules bool
env := cfg.Getenv("GO111MODULE")
switch env {
default:
base.Fatalf("go: unknown environment setting GO111MODULE=%s", env)
case "auto":
mustUseModules = ForceUseModules
case "on", "":
mustUseModules = true
case "off":
if ForceUseModules {
base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'")
}
mustUseModules = false
return
}
// Disable any prompting for passwords by Git.
// Only has an effect for 2.3.0 or later, but avoiding
// the prompt in earlier versions is just too hard.
// If user has explicitly set GIT_TERMINAL_PROMPT=1, keep
// prompting.
// See golang.org/issue/9341 and golang.org/issue/12706.
if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
os.Setenv("GIT_TERMINAL_PROMPT", "0")
}
// Disable any ssh connection pooling by Git.
// If a Git subprocess forks a child into the background to cache a new connection,
// that child keeps stdout/stderr open. After the Git subprocess exits,
// os /exec expects to be able to read from the stdout/stderr pipe
// until EOF to get all the data that the Git subprocess wrote before exiting.
// The EOF doesn't come until the child exits too, because the child
// is holding the write end of the pipe.
// This is unfortunate, but it has come up at least twice
// (see golang.org/issue/13453 and golang.org/issue/16104)
// and confuses users when it does.
// If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
// assume they know what they are doing and don't step on it.
// But default to turning off ControlMaster.
if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no")
}
if CmdModInit {
// Running 'go mod init': go.mod will be created in current directory.
modRoot = base.Cwd
} else if RootMode == NoRoot {
if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") {
base.Fatalf("go: -modfile cannot be used with commands that ignore the current module")
}
modRoot = ""
} else {
modRoot = findModuleRoot(base.Cwd)
if modRoot == "" {
if cfg.ModFile != "" {
base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.")
}
if RootMode == NeedRoot {
base.Fatalf("go: cannot find main module; see 'go help modules'")
}
if !mustUseModules {
// GO111MODULE is 'auto', and we can't find a module root.
// Stay in GOPATH mode.
return
}
} else if search.InDir(modRoot, os.TempDir()) == "." {
// If you create /tmp/go.mod for experimenting,
// then any tests that create work directories under /tmp
// will find it and get modules when they're not expecting them.
// It's a bit of a peculiar thing to disallow but quite mysterious
// when it happens. See golang.org/issue/26708.
modRoot = ""
fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir())
if !mustUseModules {
return
}
}
}
if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") {
base.Fatalf("go: -modfile=%s: file does not have .mod extension", cfg.ModFile)
}
// We're in module mode. Install the hooks to make it work.
list := filepath.SplitList(cfg.BuildContext.GOPATH)
if len(list) == 0 || list[0] == "" {
base.Fatalf("missing $GOPATH")
}
gopath = list[0]
if _, err := os.Stat(filepath.Join(gopath, "go.mod")); err == nil {
base.Fatalf("$GOPATH/go.mod exists but should not")
}
cfg.ModulesEnabled = true
if modRoot == "" {
// We're in module mode, but not inside a module.
//
// Commands like 'go build', 'go run', 'go list' have no go.mod file to
// read or write. They would need to find and download the latest versions
// of a potentially large number of modules with no way to save version
// information. We can succeed slowly (but not reproducibly), but that's
// not usually a good experience.
//
// Instead, we forbid resolving import paths to modules other than std and
// cmd. Users may still build packages specified with .go files on the
// command line, but they'll see an error if those files import anything
// outside std.
//
// This can be overridden by calling AllowMissingModuleImports.
// For example, 'go get' does this, since it is expected to resolve paths.
//
// See golang.org/issue/32027.
} else {
modfetch.GoSumFile = strings.TrimSuffix(ModFilePath(), ".mod") + ".sum"
search.SetModRoot(modRoot)
}
}
// WillBeEnabled checks whether modules should be enabled but does not
// initialize modules by installing hooks. If Init has already been called,
// WillBeEnabled returns the same result as Enabled.
//
// This function is needed to break a cycle. The main package needs to know
// whether modules are enabled in order to install the module or GOPATH version
// of 'go get', but Init reads the -modfile flag in 'go get', so it shouldn't
// be called until the command is installed and flags are parsed. Instead of
// calling Init and Enabled, the main package can call this function.
func WillBeEnabled() bool {
if modRoot != "" || cfg.ModulesEnabled {
// Already enabled.
return true
}
if initialized {
// Initialized, not enabled.
return false
}
// Keep in sync with Init. Init does extra validation and prints warnings or
// exits, so it can't call this function directly.
env := cfg.Getenv("GO111MODULE")
switch env {
case "on", "":
return true
case "auto":
break
default:
return false
}
if CmdModInit {
// Running 'go mod init': go.mod will be created in current directory.
return true
}
if modRoot := findModuleRoot(base.Cwd); modRoot == "" {
// GO111MODULE is 'auto', and we can't find a module root.
// Stay in GOPATH mode.
return false
} else if search.InDir(modRoot, os.TempDir()) == "." {
// If you create /tmp/go.mod for experimenting,
// then any tests that create work directories under /tmp
// will find it and get modules when they're not expecting them.
// It's a bit of a peculiar thing to disallow but quite mysterious
// when it happens. See golang.org/issue/26708.
return false
}
return true
}
// Enabled reports whether modules are (or must be) enabled.
// If modules are enabled but there is no main module, Enabled returns true
// and then the first use of module information will call die
// (usually through MustModRoot).
func Enabled() bool {
Init()
return modRoot != "" || cfg.ModulesEnabled
}
// ModRoot returns the root of the main module.
// It calls base.Fatalf if there is no main module.
func ModRoot() string {
if !HasModRoot() {
die()
}
return modRoot
}
// HasModRoot reports whether a main module is present.
// HasModRoot may return false even if Enabled returns true: for example, 'get'
// does not require a main module.
func HasModRoot() bool {
Init()
return modRoot != ""
}
// ModFilePath returns the effective path of the go.mod file. Normally, this
// "go.mod" in the directory returned by ModRoot, but the -modfile flag may
// change its location. ModFilePath calls base.Fatalf if there is no main
// module, even if -modfile is set.
func ModFilePath() string {
if !HasModRoot() {
die()
}
if cfg.ModFile != "" {
return cfg.ModFile
}
return filepath.Join(modRoot, "go.mod")
}
// printStackInDie causes die to print a stack trace.
//
// It is enabled by the testgo tag, and helps to diagnose paths that
// unexpectedly require a main module.
var printStackInDie = false
func die() {
if printStackInDie {
debug.PrintStack()
}
if cfg.Getenv("GO111MODULE") == "off" {
base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'")
}
if dir, name := findAltConfig(base.Cwd); dir != "" {
rel, err := filepath.Rel(base.Cwd, dir)
if err != nil {
rel = dir
}
cdCmd := ""
if rel != "." {
cdCmd = fmt.Sprintf("cd %s && ", rel)
}
base.Fatalf("go: cannot find main module, but found %s in %s\n\tto create a module there, run:\n\t%sgo mod init", name, dir, cdCmd)
}
base.Fatalf("go: cannot find main module; see 'go help modules'")
}
// InitMod sets Target and, if there is a main module, parses the initial build
// list from its go.mod file. If InitMod is called by 'go mod init', InitMod
// will populate go.mod in memory, possibly importing dependencies from a
// legacy configuration file. For other commands, InitMod may make other
// adjustments in memory, like adding a go directive. WriteGoMod should be
// called later to write changes out to disk.
//
// As a side-effect, InitMod sets a default for cfg.BuildMod if it does not
// already have an explicit value.
func InitMod(ctx context.Context) {
if len(buildList) > 0 {
return
}
Init()
if modRoot == "" {
Target = module.Version{Path: "command-line-arguments"}
targetPrefix = "command-line-arguments"
buildList = []module.Version{Target}
return
}
if CmdModInit {
// Running go mod init: do legacy module conversion
legacyModInit()
modFileToBuildList()
return
}
gomod := ModFilePath()
data, err := lockedfile.Read(gomod)
if err != nil {
base.Fatalf("go: %v", err)
}
var fixed bool
f, err := modfile.Parse(gomod, data, fixVersion(ctx, &fixed))
if err != nil {
// Errors returned by modfile.Parse begin with file:line.
base.Fatalf("go: errors parsing go.mod:\n%s\n", err)
}
modFile = f
index = indexModFile(data, f, fixed)
if f.Module == nil {
// No module declaration. Must add module path.
base.Fatalf("go: no module declaration in go.mod.\n\tRun 'go mod edit -module=example.com/mod' to specify the module path.")
}
if len(f.Syntax.Stmt) == 1 && f.Module != nil {
// Entire file is just a module statement.
// Populate require if possible.
legacyModInit()
}
if err := checkModulePathLax(f.Module.Mod.Path); err != nil {
base.Fatalf("go: %v", err)
}
setDefaultBuildMod()
modFileToBuildList()
if cfg.BuildMod == "vendor" {
readVendorList()
checkVendorConsistency()
}
}
// checkModulePathLax checks that the path meets some minimum requirements
// to avoid confusing users or the module cache. The requirements are weaker
// than those of module.CheckPath to allow room for weakening module path
// requirements in the future, but strong enough to help users avoid significant
// problems.
func checkModulePathLax(p string) error {
// TODO(matloob): Replace calls of this function in this CL with calls
// to module.CheckImportPath once it's been laxened, if it becomes laxened.
// See golang.org/issue/29101 for a discussion about whether to make CheckImportPath
// more lax or more strict.
errorf := func(format string, args ...interface{}) error {
return fmt.Errorf("invalid module path %q: %s", p, fmt.Sprintf(format, args...))
}
// Disallow shell characters " ' * < > ? ` | to avoid triggering bugs
// with file systems and subcommands. Disallow file path separators : and \
// because path separators other than / will confuse the module cache.
// See fileNameOK in golang.org/x/mod/module/module.go.
shellChars := "`" + `\"'*<>?|`
fsChars := `\:`
if i := strings.IndexAny(p, shellChars); i >= 0 {
return errorf("contains disallowed shell character %q", p[i])
}
if i := strings.IndexAny(p, fsChars); i >= 0 {
return errorf("contains disallowed path separator character %q", p[i])
}
// Ensure path.IsAbs and build.IsLocalImport are false, and that the path is
// invariant under path.Clean, also to avoid confusing the module cache.
if path.IsAbs(p) {
return errorf("is an absolute path")
}
if build.IsLocalImport(p) {
return errorf("is a local import path")
}
if path.Clean(p) != p {
return errorf("is not clean")
}
return nil
}
// fixVersion returns a modfile.VersionFixer implemented using the Query function.
//
// It resolves commit hashes and branch names to versions,
// canonicalizes versions that appeared in early vgo drafts,
// and does nothing for versions that already appear to be canonical.
//
// The VersionFixer sets 'fixed' if it ever returns a non-canonical version.
func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer {
return func(path, vers string) (resolved string, err error) {
defer func() {
if err == nil && resolved != vers {
*fixed = true
}
}()
// Special case: remove the old -gopkgin- hack.
if strings.HasPrefix(path, "gopkg.in/") && strings.Contains(vers, "-gopkgin-") {
vers = vers[strings.Index(vers, "-gopkgin-")+len("-gopkgin-"):]
}
// fixVersion is called speculatively on every
// module, version pair from every go.mod file.
// Avoid the query if it looks OK.
_, pathMajor, ok := module.SplitPathVersion(path)
if !ok {
return "", &module.ModuleError{
Path: path,
Err: &module.InvalidVersionError{
Version: vers,
Err: fmt.Errorf("malformed module path %q", path),
},
}
}
if vers != "" && module.CanonicalVersion(vers) == vers {
if err := module.CheckPathMajor(vers, pathMajor); err == nil {
return vers, nil
}
}
info, err := Query(ctx, path, vers, "", nil)
if err != nil {
return "", err
}
return info.Version, nil
}
}
// AllowMissingModuleImports allows import paths to be resolved to modules
// when there is no module root. Normally, this is forbidden because it's slow
// and there's no way to make the result reproducible, but some commands
// like 'go get' are expected to do this.
func AllowMissingModuleImports() {
allowMissingModuleImports = true
}
// modFileToBuildList initializes buildList from the modFile.
func modFileToBuildList() {
Target = modFile.Module.Mod
targetPrefix = Target.Path
if rel := search.InDir(base.Cwd, cfg.GOROOTsrc); rel != "" {
targetInGorootSrc = true
if Target.Path == "std" {
targetPrefix = ""
}
}
list := []module.Version{Target}
for _, r := range modFile.Require {
if index != nil && index.exclude[r.Mod] {
if cfg.BuildMod == "mod" {
fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
} else {
fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
}
} else {
list = append(list, r.Mod)
}
}
buildList = list
}
// setDefaultBuildMod sets a default value for cfg.BuildMod
// if it is currently empty.
func setDefaultBuildMod() {
if cfg.BuildModExplicit {
// Don't override an explicit '-mod=' argument.
return
}
if cfg.CmdName == "get" || strings.HasPrefix(cfg.CmdName, "mod ") {
// 'get' and 'go mod' commands may update go.mod automatically.
// TODO(jayconrod): should this narrower? Should 'go mod download' or
// 'go mod graph' update go.mod by default?
cfg.BuildMod = "mod"
return
}
if modRoot == "" {
cfg.BuildMod = "readonly"
return
}
if fi, err := os.Stat(filepath.Join(modRoot, "vendor")); err == nil && fi.IsDir() {
modGo := "unspecified"
if index.goVersionV != "" {
if semver.Compare(index.goVersionV, "v1.14") >= 0 {
// The Go version is at least 1.14, and a vendor directory exists.
// Set -mod=vendor by default.
cfg.BuildMod = "vendor"
cfg.BuildModReason = "Go version in go.mod is at least 1.14 and vendor directory exists."
return
} else {
modGo = index.goVersionV[1:]
}
}
// Since a vendor directory exists, we should record why we didn't use it.
// This message won't normally be shown, but it may appear with import errors.
cfg.BuildModReason = fmt.Sprintf("Go version in go.mod is %s, so vendor directory was not used.", modGo)
}
cfg.BuildMod = "readonly"
}
func legacyModInit() {
if modFile == nil {
path, err := findModulePath(modRoot)
if err != nil {
base.Fatalf("go: %v", err)
}
fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", path)
modFile = new(modfile.File)
modFile.AddModuleStmt(path)
addGoStmt() // Add the go directive before converted module requirements.
}
for _, name := range altConfigs {
cfg := filepath.Join(modRoot, name)
data, err := ioutil.ReadFile(cfg)
if err == nil {
convert := modconv.Converters[name]
if convert == nil {
return
}
fmt.Fprintf(os.Stderr, "go: copying requirements from %s\n", base.ShortPath(cfg))
cfg = filepath.ToSlash(cfg)
if err := modconv.ConvertLegacyConfig(modFile, cfg, data); err != nil {
base.Fatalf("go: %v", err)
}
if len(modFile.Syntax.Stmt) == 1 {
// Add comment to avoid re-converting every time it runs.
modFile.AddComment("// go: no requirements found in " + name)
}
return
}
}
}
// addGoStmt adds a go directive to the go.mod file if it does not already include one.
// The 'go' version added, if any, is the latest version supported by this toolchain.
func addGoStmt() {
if modFile.Go != nil && modFile.Go.Version != "" {
return
}
tags := build.Default.ReleaseTags
version := tags[len(tags)-1]
if !strings.HasPrefix(version, "go") || !modfile.GoVersionRE.MatchString(version[2:]) {
base.Fatalf("go: unrecognized default version %q", version)
}
if err := modFile.AddGoStmt(version[2:]); err != nil {
base.Fatalf("go: internal error: %v", err)
}
}
var altConfigs = []string{
"Gopkg.lock",
"GLOCKFILE",
"Godeps/Godeps.json",
"dependencies.tsv",
"glide.lock",
"vendor.conf",
"vendor.yml",
"vendor/manifest",
"vendor/vendor.json",
".git/config",
}
func findModuleRoot(dir string) (root string) {
if dir == "" {
panic("dir not set")
}
dir = filepath.Clean(dir)
// Look for enclosing go.mod.
for {
if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
return dir
}
d := filepath.Dir(dir)
if d == dir {
break
}
dir = d
}
return ""
}
func findAltConfig(dir string) (root, name string) {
if dir == "" {
panic("dir not set")
}
dir = filepath.Clean(dir)
if rel := search.InDir(dir, cfg.BuildContext.GOROOT); rel != "" {
// Don't suggest creating a module from $GOROOT/.git/config
// or a config file found in any parent of $GOROOT (see #34191).
return "", ""
}
for {
for _, name := range altConfigs {
if fi, err := os.Stat(filepath.Join(dir, name)); err == nil && !fi.IsDir() {
return dir, name
}
}
d := filepath.Dir(dir)
if d == dir {
break
}
dir = d
}
return "", ""
}
func findModulePath(dir string) (string, error) {
if CmdModModule != "" {
// Running go mod init x/y/z; return x/y/z.
if err := module.CheckImportPath(CmdModModule); err != nil {
return "", err
}
return CmdModModule, nil
}
// TODO(bcmills): once we have located a plausible module path, we should
// query version control (if available) to verify that it matches the major
// version of the most recent tag.
// See https://golang.org/issue/29433, https://golang.org/issue/27009, and
// https://golang.org/issue/31549.
// Cast about for import comments,
// first in top-level directory, then in subdirectories.
list, _ := ioutil.ReadDir(dir)
for _, info := range list {
if info.Mode().IsRegular() && strings.HasSuffix(info.Name(), ".go") {
if com := findImportComment(filepath.Join(dir, info.Name())); com != "" {
return com, nil
}
}
}
for _, info1 := range list {
if info1.IsDir() {
files, _ := ioutil.ReadDir(filepath.Join(dir, info1.Name()))
for _, info2 := range files {
if info2.Mode().IsRegular() && strings.HasSuffix(info2.Name(), ".go") {
if com := findImportComment(filepath.Join(dir, info1.Name(), info2.Name())); com != "" {
return path.Dir(com), nil
}
}
}
}
}
// Look for Godeps.json declaring import path.
data, _ := ioutil.ReadFile(filepath.Join(dir, "Godeps/Godeps.json"))
var cfg1 struct{ ImportPath string }
json.Unmarshal(data, &cfg1)
if cfg1.ImportPath != "" {
return cfg1.ImportPath, nil
}
// Look for vendor.json declaring import path.
data, _ = ioutil.ReadFile(filepath.Join(dir, "vendor/vendor.json"))
var cfg2 struct{ RootPath string }
json.Unmarshal(data, &cfg2)
if cfg2.RootPath != "" {
return cfg2.RootPath, nil
}
// Look for path in GOPATH.
var badPathErr error
for _, gpdir := range filepath.SplitList(cfg.BuildContext.GOPATH) {
if gpdir == "" {
continue
}
if rel := search.InDir(dir, filepath.Join(gpdir, "src")); rel != "" && rel != "." {
path := filepath.ToSlash(rel)
// TODO(matloob): replace this with module.CheckImportPath
// once it's been laxened.
// Only checkModulePathLax here. There are some unpublishable
// module names that are compatible with checkModulePathLax
// but they already work in GOPATH so don't break users
// trying to do a build with modules. gorelease will alert users
// publishing their modules to fix their paths.
if err := checkModulePathLax(path); err != nil {
badPathErr = err
break
}
return path, nil
}
}
reason := "outside GOPATH, module path must be specified"
if badPathErr != nil {
// return a different error message if the module was in GOPATH, but
// the module path determined above would be an invalid path.
reason = fmt.Sprintf("bad module path inferred from directory in GOPATH: %v", badPathErr)
}
msg := `cannot determine module path for source directory %s (%s)
Example usage:
'go mod init example.com/m' to initialize a v0 or v1 module
'go mod init example.com/m/v2' to initialize a v2 module
Run 'go help mod init' for more information.
`
return "", fmt.Errorf(msg, dir, reason)
}
var (
importCommentRE = lazyregexp.New(`(?m)^package[ \t]+[^ \t\r\n/]+[ \t]+//[ \t]+import[ \t]+(\"[^"]+\")[ \t]*\r?\n`)
)
func findImportComment(file string) string {
data, err := ioutil.ReadFile(file)
if err != nil {
return ""
}
m := importCommentRE.FindSubmatch(data)
if m == nil {
return ""
}
path, err := strconv.Unquote(string(m[1]))
if err != nil {
return ""
}
return path
}
var allowWriteGoMod = true
// DisallowWriteGoMod causes future calls to WriteGoMod to do nothing at all.
func DisallowWriteGoMod() {
allowWriteGoMod = false
}
// AllowWriteGoMod undoes the effect of DisallowWriteGoMod:
// future calls to WriteGoMod will update go.mod if needed.
// Note that any past calls have been discarded, so typically
// a call to AlowWriteGoMod should be followed by a call to WriteGoMod.
func AllowWriteGoMod() {
allowWriteGoMod = true
}
// MinReqs returns a Reqs with minimal additional dependencies of Target,
// as will be written to go.mod.
func MinReqs() mvs.Reqs {
var retain []string
for _, m := range buildList[1:] {
_, explicit := index.require[m]
if explicit || loaded.direct[m.Path] {
retain = append(retain, m.Path)
}
}
min, err := mvs.Req(Target, retain, Reqs())
if err != nil {
base.Fatalf("go: %v", err)
}
return &mvsReqs{buildList: append([]module.Version{Target}, min...)}
}
// WriteGoMod writes the current build list back to go.mod.
func WriteGoMod() {
// If we're using -mod=vendor we basically ignored
// go.mod, so definitely don't try to write back our
// incomplete view of the world.
if !allowWriteGoMod || cfg.BuildMod == "vendor" {
return
}
// If we aren't in a module, we don't have anywhere to write a go.mod file.
if modRoot == "" {
return
}
if cfg.BuildMod != "readonly" {
addGoStmt()
}
if loaded != nil {
reqs := MinReqs()
min, err := reqs.Required(Target)
if err != nil {
base.Fatalf("go: %v", err)
}
var list []*modfile.Require
for _, m := range min {
list = append(list, &modfile.Require{
Mod: m,
Indirect: !loaded.direct[m.Path],
})
}
modFile.SetRequire(list)
}
modFile.Cleanup()
dirty := index.modFileIsDirty(modFile)
if dirty && cfg.BuildMod == "readonly" {
// If we're about to fail due to -mod=readonly,
// prefer to report a dirty go.mod over a dirty go.sum
if cfg.BuildModExplicit {
base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly")
} else if cfg.BuildModReason != "" {
base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly\n\t(%s)", cfg.BuildModReason)
} else {
base.Fatalf("go: updates to go.mod needed; try 'go mod tidy' first")
}
}
if !dirty && cfg.CmdName != "mod tidy" {
// The go.mod file has the same semantic content that it had before
// (but not necessarily the same exact bytes).
// Don't write go.mod, but write go.sum in case we added or trimmed sums.
modfetch.WriteGoSum(keepSums(true))
return
}
new, err := modFile.Format()
if err != nil {
base.Fatalf("go: %v", err)
}
defer func() {
// At this point we have determined to make the go.mod file on disk equal to new.
index = indexModFile(new, modFile, false)
// Update go.sum after releasing the side lock and refreshing the index.
modfetch.WriteGoSum(keepSums(true))
}()
// Make a best-effort attempt to acquire the side lock, only to exclude
// previous versions of the 'go' command from making simultaneous edits.
if unlock, err := modfetch.SideLock(); err == nil {
defer unlock()
}
errNoChange := errors.New("no update needed")
err = lockedfile.Transform(ModFilePath(), func(old []byte) ([]byte, error) {
if bytes.Equal(old, new) {
// The go.mod file is already equal to new, possibly as the result of some
// other process.
return nil, errNoChange
}
if index != nil && !bytes.Equal(old, index.data) {
// The contents of the go.mod file have changed. In theory we could add all
// of the new modules to the build list, recompute, and check whether any
// module in *our* build list got bumped to a different version, but that's
// a lot of work for marginal benefit. Instead, fail the command: if users
// want to run concurrent commands, they need to start with a complete,
// consistent module definition.
return nil, fmt.Errorf("existing contents have changed since last read")
}
return new, nil
})
if err != nil && err != errNoChange {
base.Fatalf("go: updating go.mod: %v", err)
}
}
// keepSums returns a set of module sums to preserve in go.sum. The set
// includes entries for all modules used to load packages (according to
// the last load function such as LoadPackages or ImportFromFiles).
// It also contains entries for go.mod files needed for MVS (the version
// of these entries ends with "/go.mod").
//
// If addDirect is true, the set also includes sums for modules directly
// required by go.mod, as represented by the index, with replacements applied.
func keepSums(addDirect bool) map[module.Version]bool {
// Walk the module graph and keep sums needed by MVS.
modkey := func(m module.Version) module.Version {
return module.Version{Path: m.Path, Version: m.Version + "/go.mod"}
}
keep := make(map[module.Version]bool)
replaced := make(map[module.Version]bool)
reqs := Reqs()
var walk func(module.Version)
walk = func(m module.Version) {
// If we build using a replacement module, keep the sum for the replacement,
// since that's the code we'll actually use during a build.
r := Replacement(m)
if r.Path == "" {
keep[modkey(m)] = true
} else {
replaced[m] = true
keep[modkey(r)] = true
}
list, _ := reqs.Required(m)
for _, r := range list {
if !keep[modkey(r)] && !replaced[r] {
walk(r)
}
}
}
walk(Target)
// Add entries for modules from which packages were loaded.
if loaded != nil {
for _, pkg := range loaded.pkgs {
m := pkg.mod
if r := Replacement(m); r.Path != "" {
keep[r] = true
} else {
keep[m] = true
}
}
}
// Add entries for modules directly required by go.mod.
if addDirect {
for m := range index.require {
var kept module.Version
if r := Replacement(m); r.Path != "" {
kept = r
} else {
kept = m
}
keep[kept] = true
keep[module.Version{Path: kept.Path, Version: kept.Version + "/go.mod"}] = true
}
}
return keep
}
func TrimGoSum() {
// Don't retain sums for direct requirements in go.mod. When TrimGoSum is
// called, go.mod has not been updated, and it may contain requirements on
// modules deleted from the build list.
addDirect := false
modfetch.TrimGoSum(keepSums(addDirect))
}
|
[
"\"GIT_TERMINAL_PROMPT\"",
"\"GIT_SSH\"",
"\"GIT_SSH_COMMAND\""
] |
[] |
[
"GIT_SSH",
"GIT_SSH_COMMAND",
"GIT_TERMINAL_PROMPT"
] |
[]
|
["GIT_SSH", "GIT_SSH_COMMAND", "GIT_TERMINAL_PROMPT"]
|
go
| 3 | 0 | |
utils.go
|
package alipay
import (
"fmt"
"os"
"os/exec"
"runtime"
)
func getEnv() string {
env := os.Getenv("env")
if env == "" {
env = "local"
}
return env
}
func showUrl(url string) error {
switch runtime.GOOS {
case "linux":
return exec.Command("xdg-open", url).Start()
case "windows":
return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
case "darwin":
return exec.Command("open", url).Start()
default:
return fmt.Errorf("unsupported platform")
}
}
|
[
"\"env\""
] |
[] |
[
"env"
] |
[]
|
["env"]
|
go
| 1 | 0 | |
test/e2e/certificate_gce.go
|
/*
Copyright The Voyager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
api "github.com/appscode/voyager/apis/voyager/v1beta1"
"github.com/appscode/voyager/pkg/certificate"
"github.com/appscode/voyager/test/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var _ = Describe("CertificateWithDNSProvider", func() {
var (
f *framework.Invocation
cert *api.Certificate
userSecret *core.Secret
credentialSecret *core.Secret
)
BeforeEach(func() {
f = root.Invoke()
skipTestIfSecretNotProvided()
if !options.TestCertificate {
Skip("Certificate Test is not enabled")
}
userSecret = &core.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "user-" + f.Certificate.UniqueName(),
Namespace: f.Namespace(),
},
Data: map[string][]byte{
api.ACMEUserEmail: []byte("[email protected]"),
api.ACMEServerURL: []byte(certificate.LetsEncryptStagingURL),
},
}
_, err := f.KubeClient.CoreV1().Secrets(userSecret.Namespace).Create(userSecret)
Expect(err).NotTo(HaveOccurred())
})
BeforeEach(func() {
f = root.Invoke()
fmt.Println("TEST_GCE_PROJECT", os.Getenv("TEST_GCE_PROJECT"))
fmt.Println("TEST_DNS_DOMAINS", os.Getenv("TEST_DNS_DOMAINS"))
credentialSecret = &core.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "cred-" + f.Certificate.UniqueName(),
Namespace: f.Namespace(),
},
Data: map[string][]byte{
"GCE_PROJECT": []byte(os.Getenv("TEST_GCE_PROJECT")),
"GCE_SERVICE_ACCOUNT_DATA": []byte(os.Getenv("TEST_GCE_SERVICE_ACCOUNT_DATA")),
},
}
_, err := f.KubeClient.CoreV1().Secrets(credentialSecret.Namespace).Create(credentialSecret)
Expect(err).NotTo(HaveOccurred())
})
BeforeEach(func() {
cert = f.Certificate.GetSkeleton()
cert.Spec = api.CertificateSpec{
Domains: []string{os.Getenv("TEST_DNS_DOMAINS")},
ChallengeProvider: api.ChallengeProvider{
DNS: &api.DNSChallengeProvider{
Provider: "googlecloud",
CredentialSecretName: credentialSecret.Name,
},
},
ACMEUserSecretName: userSecret.Name,
Storage: api.CertificateStorage{
Secret: &core.LocalObjectReference{},
},
}
})
JustBeforeEach(func() {
By("Creating certificate with" + cert.Name)
err := f.Certificate.Create(cert)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
if options.Cleanup {
Expect(f.KubeClient.CoreV1().Secrets(userSecret.Namespace).Delete(userSecret.Name, &metav1.DeleteOptions{})).NotTo(HaveOccurred())
Expect(f.KubeClient.CoreV1().Secrets(credentialSecret.Namespace).Delete(credentialSecret.Name, &metav1.DeleteOptions{})).NotTo(HaveOccurred())
}
})
Describe("Create", func() {
It("Should check secret", func() {
Eventually(func() bool {
secret, err := f.KubeClient.CoreV1().Secrets(cert.Namespace).Get(cert.SecretName(), metav1.GetOptions{})
if err != nil {
return false
}
if _, ok := secret.Data["tls.crt"]; !ok {
return false
}
return true
}, "20m", "10s").Should(BeTrue())
})
})
})
func skipTestIfSecretNotProvided() {
if len(os.Getenv("TEST_GCE_PROJECT")) == 0 ||
len(os.Getenv("TEST_GCE_SERVICE_ACCOUNT_DATA")) == 0 {
Skip("Skipping Test, Secret Not Provided")
}
}
|
[
"\"TEST_GCE_PROJECT\"",
"\"TEST_DNS_DOMAINS\"",
"\"TEST_GCE_PROJECT\"",
"\"TEST_GCE_SERVICE_ACCOUNT_DATA\"",
"\"TEST_DNS_DOMAINS\"",
"\"TEST_GCE_PROJECT\"",
"\"TEST_GCE_SERVICE_ACCOUNT_DATA\""
] |
[] |
[
"TEST_GCE_SERVICE_ACCOUNT_DATA",
"TEST_DNS_DOMAINS",
"TEST_GCE_PROJECT"
] |
[]
|
["TEST_GCE_SERVICE_ACCOUNT_DATA", "TEST_DNS_DOMAINS", "TEST_GCE_PROJECT"]
|
go
| 3 | 0 | |
core/os/device/remotessh/device.go
|
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remotessh
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strings"
"github.com/golang/protobuf/jsonpb"
"github.com/google/gapid/core/app/layout"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/os/device"
"github.com/google/gapid/core/os/device/bind"
"github.com/google/gapid/core/os/shell"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/crypto/ssh/knownhosts"
)
// Device extends the bind.Device interface with capabilities specific to
// remote SSH clients
type Device interface {
bind.Device
// PushFile will transfer the local file at sourcePath to the remote
// machine at destPath
PushFile(ctx context.Context, sourcePath, destPath string) error
// MakeTempDir makes a temporary directory, and returns the
// path, as well as a function to call to clean it up.
MakeTempDir(ctx context.Context) (string, func(ctx context.Context), error)
// WriteFile writes the given file into the given location on the remote device
WriteFile(ctx context.Context, contents io.Reader, mode os.FileMode, destPath string) error
// DefaultReplayCacheDir returns the default path for replay resource caches
DefaultReplayCacheDir() string
}
// MaxNumberOfSSHConnections defines the max number of ssh connections to each
// ssh remote device that can be used to run commands concurrently.
const MaxNumberOfSSHConnections = 15
// binding represents an attached SSH client.
type binding struct {
bind.Simple
connection *ssh.Client
configuration *Configuration
env *shell.Env
// We duplicate OS here because we need to use it
// before we get the rest of the information
os device.OSKind
// pool to limit the maximum number of connections
ch chan int
}
type pooledSession struct {
ch chan int
session *ssh.Session
}
func (p *pooledSession) kill() error {
select {
case <-p.ch:
default:
}
<-p.ch
return p.session.Signal(ssh.SIGSEGV)
}
func (p *pooledSession) wait() error {
ret := p.session.Wait()
select {
case <-p.ch:
default:
}
return ret
}
func newBinding(conn *ssh.Client, conf *Configuration, env *shell.Env) *binding {
b := &binding{
connection: conn,
configuration: conf,
env: env,
ch: make(chan int, MaxNumberOfSSHConnections),
Simple: bind.Simple{
To: &device.Instance{
Serial: "",
Configuration: &device.Configuration{},
},
LastStatus: bind.Status_Online,
},
}
return b
}
func (b *binding) newPooledSession() (*pooledSession, error) {
b.ch <- int(0)
session, err := b.connection.NewSession()
if err != nil {
<-b.ch
err = fmt.Errorf("New SSH Session Error: %v, Current maximum number of ssh connections GAPID can issue to each remote device is: %v", err, MaxNumberOfSSHConnections)
return nil, err
}
return &pooledSession{
ch: b.ch,
session: session,
}, nil
}
var _ Device = &binding{}
// Devices returns the list of reachable SSH devices.
func Devices(ctx context.Context, configuration io.Reader) ([]bind.Device, error) {
configurations, err := ReadConfigurations(configuration)
if err != nil {
return nil, err
}
devices := make([]bind.Device, 0, len(configurations))
for _, cfg := range configurations {
if device, err := GetConnectedDevice(ctx, cfg); err == nil {
devices = append(devices, device)
}
}
return devices, nil
}
// getSSHAgent returns a connection to a local SSH agent, if one exists.
func getSSHAgent() ssh.AuthMethod {
if sshAgent, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil {
return ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)
}
return nil
}
// This returns an SSH auth for the given private key.
// It will fail if the private key was encrypted.
func getPrivateKeyAuth(path string) (ssh.AuthMethod, error) {
bytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
signer, err := ssh.ParsePrivateKey(bytes)
if err != nil {
return nil, err
}
return ssh.PublicKeys(signer), nil
}
// GetConnectedDevice returns a device that matches the given configuration.
func GetConnectedDevice(ctx context.Context, c Configuration) (Device, error) {
auths := []ssh.AuthMethod{}
if c.Keyfile != "" {
// This returns an SSH auth for the given private key.
// It will fail if the private key was encrypted.
if auth, err := getPrivateKeyAuth(c.Keyfile); err == nil {
auths = append(auths, auth)
}
}
if agent := getSSHAgent(); agent != nil {
auths = append(auths, agent)
}
if len(auths) == 0 {
return nil, log.Errf(ctx, nil, "No valid authentication method for SSH connection %s", c.Name)
}
hosts, err := knownhosts.New(c.KnownHosts)
if err != nil {
return nil, log.Errf(ctx, err, "Could not read known hosts")
}
sshConfig := &ssh.ClientConfig{
User: c.User,
Auth: auths,
HostKeyCallback: hosts,
}
connection, err := ssh.Dial("tcp", fmt.Sprintf("%s:%d", c.Host, c.Port), sshConfig)
if err != nil {
return nil, log.Errf(ctx, err, "Dial tcp: %s:%d with sshConfig: %v failed", c.Host, c.Port, sshConfig)
}
env := shell.NewEnv()
for _, e := range c.Env {
env.Add(e)
}
b := newBinding(connection, &c, env)
kind := device.UnknownOS
// Try to get the OS string for Mac/Linux
if osName, err := b.Shell("uname", "-a").Call(ctx); err == nil {
if strings.Contains(osName, "Darwin") {
kind = device.OSX
} else if strings.Contains(osName, "Linux") {
kind = device.Linux
}
}
if kind == device.UnknownOS {
// Try to get the OS string for Windows
if osName, err := b.Shell("ver").Call(ctx); err == nil {
if strings.Contains(osName, "Windows") {
kind = device.Windows
}
}
}
if kind == device.UnknownOS {
return nil, log.Errf(ctx, nil, "Could not determine unix type")
}
b.os = kind
dir, cleanup, err := b.MakeTempDir(ctx)
if err != nil {
return nil, log.Errf(ctx, err, "Could not make temporary directory")
}
defer cleanup(ctx)
localDeviceInfo, err := layout.DeviceInfo(ctx, b.os)
if err != nil {
return nil, log.Errf(ctx, err, "Could not get device info")
}
if err = b.PushFile(ctx, localDeviceInfo.System(), dir+"/device-info"); err != nil {
return nil, log.Errf(ctx, err, "Error running: './device-info'")
}
stderr := bytes.Buffer{}
stdout := bytes.Buffer{}
err = b.Shell("./device-info").In(dir).Capture(&stdout, &stderr).Run(ctx)
if err != nil {
return nil, err
}
if stderr.String() != "" {
log.W(ctx, "Deviceinfo succeeded, but returned error string %s", stderr.String())
}
devInfo := stdout.String()
var device device.Instance
if err := jsonpb.Unmarshal(bytes.NewReader([]byte(devInfo)), &device); err != nil {
panic(err)
}
device.Name = c.Name
device.GenID()
for i := range device.ID.Data {
// Flip some bits, since if you have a local & ssh device
// they would otherwise be the same
device.ID.Data[i] = 0x10 ^ device.ID.Data[i]
}
b.To = &device
return b, nil
}
// DefaultReplayCacheDir implements Device interface
func (b *binding) DefaultReplayCacheDir() string {
return ""
}
|
[
"\"SSH_AUTH_SOCK\""
] |
[] |
[
"SSH_AUTH_SOCK"
] |
[]
|
["SSH_AUTH_SOCK"]
|
go
| 1 | 0 | |
tests/support/runtests.py
|
# -*- coding: utf-8 -*-
"""
:codeauthor: Pedro Algarvio ([email protected])
.. _runtime_vars:
Runtime Variables
-----------------
:command:`salt-runtests` provides a variable, :py:attr:`RUNTIME_VARS` which has some common paths defined at
startup:
.. autoattribute:: tests.support.runtests.RUNTIME_VARS
:annotation:
:TMP: Tests suite temporary directory
:TMP_CONF_DIR: Configuration directory from where the daemons that :command:`salt-runtests` starts get their
configuration files.
:TMP_CONF_MASTER_INCLUDES: Salt Master configuration files includes directory. See
:salt_conf_master:`default_include`.
:TMP_CONF_MINION_INCLUDES: Salt Minion configuration files includes directory. Seei
:salt_conf_minion:`include`.
:TMP_CONF_CLOUD_INCLUDES: Salt cloud configuration files includes directory. The same as the salt master and
minion includes configuration, though under a different directory name.
:TMP_CONF_CLOUD_PROFILE_INCLUDES: Salt cloud profiles configuration files includes directory. Same as above.
:TMP_CONF_CLOUD_PROVIDER_INCLUDES: Salt cloud providers configuration files includes directory. Same as above.
:TMP_SCRIPT_DIR: Temporary scripts directory from where the Salt CLI tools will be called when running tests.
:TMP_SALT_INTEGRATION_FILES: Temporary directory from where Salt's test suite integration files are copied to.
:TMP_BASEENV_STATE_TREE: Salt master's **base** environment state tree directory
:TMP_PRODENV_STATE_TREE: Salt master's **production** environment state tree directory
:TMP_BASEENV_PILLAR_TREE: Salt master's **base** environment pillar tree directory
:TMP_PRODENV_PILLAR_TREE: Salt master's **production** environment pillar tree directory
Use it on your test case in case of need. As simple as:
.. code-block:: python
import os
from tests.support.runtests import RUNTIME_VARS
# Path to the testing minion configuration file
minion_config_path = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion')
.. _`pytest`: http://pytest.org
.. _`nose`: https://nose.readthedocs.org
"""
from __future__ import absolute_import, print_function
import logging
import os
import shutil
import salt.utils.path
import salt.utils.platform
import tests.support.paths as paths
from salt.ext import six
try:
import pwd
except ImportError:
import salt.utils.win_functions
log = logging.getLogger(__name__)
def this_user():
"""
Get the user associated with the current process.
"""
if salt.utils.platform.is_windows():
return salt.utils.win_functions.get_current_user(with_domain=False)
return pwd.getpwuid(os.getuid())[0]
class RootsDict(dict):
def merge(self, data):
for key, values in six.iteritems(data):
if key not in self:
self[key] = values
continue
for value in values:
if value not in self[key]:
self[key].append(value)
return self
def to_dict(self):
return dict(self)
def recursive_copytree(source, destination, overwrite=False):
for root, dirs, files in os.walk(source):
for item in dirs:
src_path = os.path.join(root, item)
dst_path = os.path.join(
destination, src_path.replace(source, "").lstrip(os.sep)
)
if not os.path.exists(dst_path):
log.debug("Creating directory: %s", dst_path)
os.makedirs(dst_path)
for item in files:
src_path = os.path.join(root, item)
dst_path = os.path.join(
destination, src_path.replace(source, "").lstrip(os.sep)
)
if os.path.exists(dst_path) and not overwrite:
if os.stat(src_path).st_mtime > os.stat(dst_path).st_mtime:
log.debug("Copying %s to %s", src_path, dst_path)
shutil.copy2(src_path, dst_path)
else:
if not os.path.isdir(os.path.dirname(dst_path)):
log.debug("Creating directory: %s", os.path.dirname(dst_path))
os.makedirs(os.path.dirname(dst_path))
log.debug("Copying %s to %s", src_path, dst_path)
shutil.copy2(src_path, dst_path)
class RuntimeVars(object):
__self_attributes__ = ("_vars", "_locked", "lock")
def __init__(self, **kwargs):
self._vars = kwargs
self._locked = False
def lock(self):
# Late import
from salt.utils.immutabletypes import freeze
frozen_vars = freeze(self._vars.copy())
self._vars = frozen_vars
self._locked = True
def __iter__(self):
for name, value in six.iteritems(self._vars):
yield name, value
def __getattribute__(self, name):
if name in object.__getattribute__(self, "_vars"):
return object.__getattribute__(self, "_vars")[name]
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if getattr(self, "_locked", False) is True:
raise RuntimeError(
"After {0} is locked, no additional data can be added to it".format(
self.__class__.__name__
)
)
if name in object.__getattribute__(self, "__self_attributes__"):
object.__setattr__(self, name, value)
return
self._vars[name] = value
# <---- Helper Methods -----------------------------------------------------------------------------------------------
# ----- Global Variables -------------------------------------------------------------------------------------------->
XML_OUTPUT_DIR = os.environ.get(
"SALT_XML_TEST_REPORTS_DIR", os.path.join(paths.TMP, "xml-test-reports")
)
# <---- Global Variables ---------------------------------------------------------------------------------------------
# ----- Tests Runtime Variables ------------------------------------------------------------------------------------->
RUNTIME_VARS = RuntimeVars(
TMP=paths.TMP,
SYS_TMP_DIR=paths.SYS_TMP_DIR,
FILES=paths.FILES,
CONF_DIR=paths.CONF_DIR,
PILLAR_DIR=paths.PILLAR_DIR,
ENGINES_DIR=paths.ENGINES_DIR,
LOG_HANDLERS_DIR=paths.LOG_HANDLERS_DIR,
TMP_ROOT_DIR=paths.TMP_ROOT_DIR,
TMP_CONF_DIR=paths.TMP_CONF_DIR,
TMP_MINION_CONF_DIR=paths.TMP_MINION_CONF_DIR,
TMP_CONF_MASTER_INCLUDES=os.path.join(paths.TMP_CONF_DIR, "master.d"),
TMP_CONF_MINION_INCLUDES=os.path.join(paths.TMP_CONF_DIR, "minion.d"),
TMP_CONF_PROXY_INCLUDES=os.path.join(paths.TMP_CONF_DIR, "proxy.d"),
TMP_CONF_CLOUD_INCLUDES=os.path.join(paths.TMP_CONF_DIR, "cloud.conf.d"),
TMP_CONF_CLOUD_PROFILE_INCLUDES=os.path.join(
paths.TMP_CONF_DIR, "cloud.profiles.d"
),
TMP_CONF_CLOUD_PROVIDER_INCLUDES=os.path.join(
paths.TMP_CONF_DIR, "cloud.providers.d"
),
TMP_SUB_MINION_CONF_DIR=paths.TMP_SUB_MINION_CONF_DIR,
TMP_SYNDIC_MASTER_CONF_DIR=paths.TMP_SYNDIC_MASTER_CONF_DIR,
TMP_SYNDIC_MINION_CONF_DIR=paths.TMP_SYNDIC_MINION_CONF_DIR,
TMP_PROXY_CONF_DIR=paths.TMP_PROXY_CONF_DIR,
TMP_MM_CONF_DIR=paths.TMP_MM_CONF_DIR,
TMP_MM_MINION_CONF_DIR=paths.TMP_MM_MINION_CONF_DIR,
TMP_MM_SUB_CONF_DIR=paths.TMP_MM_SUB_CONF_DIR,
TMP_MM_SUB_MINION_CONF_DIR=paths.TMP_MM_SUB_CONF_DIR,
TMP_SCRIPT_DIR=paths.TMP_SCRIPT_DIR,
TMP_STATE_TREE=paths.TMP_STATE_TREE,
TMP_BASEENV_STATE_TREE=paths.TMP_STATE_TREE,
TMP_PILLAR_TREE=paths.TMP_PILLAR_TREE,
TMP_BASEENV_PILLAR_TREE=paths.TMP_PILLAR_TREE,
TMP_PRODENV_STATE_TREE=paths.TMP_PRODENV_STATE_TREE,
TMP_PRODENV_PILLAR_TREE=paths.TMP_PRODENV_PILLAR_TREE,
SHELL_TRUE_PATH=salt.utils.path.which("true")
if not salt.utils.platform.is_windows()
else "cmd /c exit 0 > nul",
SHELL_FALSE_PATH=salt.utils.path.which("false")
if not salt.utils.platform.is_windows()
else "cmd /c exit 1 > nul",
RUNNING_TESTS_USER=this_user(),
RUNTIME_CONFIGS={},
CODE_DIR=paths.CODE_DIR,
BASE_FILES=paths.BASE_FILES,
PROD_FILES=paths.PROD_FILES,
TESTS_DIR=paths.TESTS_DIR,
PYTEST_SESSION=False,
)
# <---- Tests Runtime Variables --------------------------------------------------------------------------------------
|
[] |
[] |
[
"SALT_XML_TEST_REPORTS_DIR"
] |
[]
|
["SALT_XML_TEST_REPORTS_DIR"]
|
python
| 1 | 0 | |
handler.py
|
import json
import os
import sys
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(here, "./vendored"))
import requests
from googletrans import Translator
TOKEN = os.environ['TELEGRAM_TOKEN']
BASE_URL = "https://api.telegram.org/bot{}".format(TOKEN)
def translate(text):
translator = Translator()
language = translator.detect(text)
if language.lang == "ru":
result_translate = translator.translate(text, dest='en')
elif language.lang == "en":
result_translate = translator.translate(text, dest='ru')
else:
return "Incorrect request";
return result_translate.text
def send_message(chat_id, text):
url = BASE_URL + "/sendMessage"
data = {
"chat_id": chat_id,
"text": text
}
request = requests.post(url, data = data)
def entry_point(event, context):
data = json.loads(event["body"])
translate_text = translate(data["message"]["text"])
send_message(data["message"]["from"]["id"], translate_text)
return {"statusCode": 200}
|
[] |
[] |
[
"TELEGRAM_TOKEN"
] |
[]
|
["TELEGRAM_TOKEN"]
|
python
| 1 | 0 | |
run.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Linh Pham
# wwdtm_uniquedates is relased under the terms of the Apache License 2.0
"""Calculates all of the calendar days for each month in which there has been
a Wait Wait Don't Tell Me! show"""
from collections import OrderedDict
import json
import math
import os
import textwrap
from typing import List, Dict
import mysql.connector
MONTHS = ["January", "February", "March", "April",
"May", "June", "July", "August",
"September", "October", "November", "December"
]
MONTH_NUMBER_OF_DAYS = [31, 29, 31, 30,
31, 30, 31, 31,
30, 31, 30, 31
]
def retrieve_all_shows(database_connection: mysql.connector.connect,
max_year: int = 2020) -> Dict:
"""..."""
all_show_dates = OrderedDict()
regular_show_dates = OrderedDict()
for month in range(1, 13, 1):
all_show_dates[month] = []
regular_show_dates[month] = []
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT s.showdate, s.bestof, s.repeatshowid FROM ww_shows s "
"WHERE YEAR(s.showdate) <= %s "
"ORDER BY s.showdate ASC;")
cursor.execute(query, (max_year,))
result = cursor.fetchall()
if not result:
return None
for show in result:
show_date = show["showdate"]
if show_date.day not in all_show_dates[show_date.month]:
all_show_dates[show_date.month].append(show_date.day)
if not show["bestof"] and not show["repeatshowid"]:
if show_date.day not in regular_show_dates[show_date.month]:
regular_show_dates[show_date.month].append(show_date.day)
for month in all_show_dates:
all_show_dates[month].sort()
for month in regular_show_dates:
regular_show_dates[month].sort()
return all_show_dates, regular_show_dates
def load_config(app_environment) -> Dict:
"""Load configuration file from config.json"""
with open('config.json', 'r') as config_file:
config_dict = json.load(config_file)
return config_dict
def main():
"""Pull in scoring data and generate stats based on the data"""
app_environment = os.getenv("APP_ENV", "local").strip().lower()
config = load_config(app_environment)
database_connection = mysql.connector.connect(**config["database"])
all_show_dates, regular_show_dates = retrieve_all_shows(database_connection)
print("All Shows")
print("=========\n")
for month in all_show_dates:
print(MONTHS[month - 1])
print("")
days = " ".join("{:<3}".format(day) for day in all_show_dates[month])
for line in textwrap.wrap(days, 30):
print(line)
print("")
print("Days in {}: {}".format(MONTHS[month - 1],
MONTH_NUMBER_OF_DAYS[month - 1]))
print("Show days in {}: {}\n".format(MONTHS[month -1],
len(all_show_dates[month])))
print("")
print("Regular Shows")
print("=============\n")
for month in regular_show_dates:
print(MONTHS[month - 1])
print("")
days = " ".join("{:<3}".format(day) for day in regular_show_dates[month])
for line in textwrap.wrap(days, 30):
print(line)
print("")
print("Days in {}: {}".format(MONTHS[month - 1],
MONTH_NUMBER_OF_DAYS[month - 1]))
print("Show days in {}: {}\n".format(MONTHS[month -1],
len(regular_show_dates[month])))
return None
# Only run if executed as a script and not imported
if __name__ == "__main__":
main()
|
[] |
[] |
[
"APP_ENV"
] |
[]
|
["APP_ENV"]
|
python
| 1 | 0 | |
rqalpha/mod/rqalpha_mod_fxdayu_source/utils/quantos.py
|
import os
_api = None
class QuantOsDataApiMixin(object):
def __init__(self, api_url=None, user=None, token=None):
global _api
from jaqs.data import DataApi
if _api is None:
url = api_url or os.environ.get("QUANTOS_URL", "tcp://data.quantos.org:8910")
user = user or os.environ.get("QUANTOS_USER")
token = token or os.environ.get("QUANTOS_TOKEN")
_api = DataApi(addr=url)
_api.login(user, token)
self._api = _api
|
[] |
[] |
[
"QUANTOS_USER",
"QUANTOS_URL",
"QUANTOS_TOKEN"
] |
[]
|
["QUANTOS_USER", "QUANTOS_URL", "QUANTOS_TOKEN"]
|
python
| 3 | 0 | |
storage/s3.go
|
package storage
import (
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
awsSession "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/tokubai/kinu/logger"
)
type S3Storage struct {
Storage
client *s3.S3
region string
bucket string
bucketBasePath string
}
type S3StorageItem struct {
StorageItem
Object *s3.Object
}
func openS3Storage() (Storage, error) {
s := &S3Storage{}
err := s.Open()
if err != nil {
return nil, logger.ErrorDebug(err)
}
return s, nil
}
func (s *S3Storage) Open() error {
s.region = os.Getenv("KINU_S3_REGION")
if len(s.region) == 0 {
return &ErrInvalidStorageOption{Message: "KINU_S3_REGION system env is required"}
}
s.bucket = os.Getenv("KINU_S3_BUCKET")
if len(s.bucket) == 0 {
return &ErrInvalidStorageOption{Message: "KINU_S3_BUCKET system env is required"}
}
s.bucketBasePath = os.Getenv("KINU_S3_BUCKET_BASE_PATH")
s.client = s3.New(awsSession.New(), &aws.Config{Region: aws.String(s.region)})
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"base_path": s.bucketBasePath,
"region": s.region,
}).Debug("open s3 storage")
return nil
}
func (s *S3Storage) BuildKey(key string) string {
if s.bucketBasePath == "/" {
return key
} else if len(s.bucketBasePath) == 0 {
return key
} else if strings.HasSuffix(s.bucketBasePath, "/") {
return s.bucketBasePath + key
} else {
return s.bucketBasePath + "/" + key
}
}
func (s *S3Storage) Fetch(key string) (*Object, error) {
key = s.BuildKey(key)
params := &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
}
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"key": key,
}).Debug("start get object from s3")
resp, err := s.client.GetObject(params)
if reqerr, ok := err.(awserr.RequestFailure); ok && reqerr.StatusCode() == http.StatusNotFound {
return nil, ErrImageNotFound
} else if err != nil {
return nil, logger.ErrorDebug(err)
}
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"key": key,
}).Debug("found object from s3")
defer resp.Body.Close()
object := &Object{
Metadata: make(map[string]string, 0),
}
for k, v := range resp.Metadata {
object.Metadata[k] = *v
}
object.Body, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, logger.ErrorDebug(err)
}
return object, nil
}
func (s *S3Storage) PutFromBlob(key string, image []byte, contentType string, metadata map[string]string) error {
tmpfile, err := ioutil.TempFile("", "kinu-upload")
if err != nil {
return logger.ErrorDebug(err)
}
_, err = tmpfile.Write(image)
if err != nil {
return logger.ErrorDebug(err)
}
defer func() {
tmpfile.Close()
os.Remove(tmpfile.Name())
}()
return s.Put(key, tmpfile, contentType, metadata)
}
func (s *S3Storage) Put(key string, imageFile io.ReadSeeker, contentType string, metadata map[string]string) error {
putMetadata := make(map[string]*string, 0)
for k, v := range metadata {
putMetadata[k] = aws.String(v)
}
_, err := imageFile.Seek(0, 0)
if err != nil {
return logger.ErrorDebug(err)
}
_, err = s.client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(s.BuildKey(key)),
ContentType: aws.String(contentType),
Body: imageFile,
Metadata: putMetadata,
})
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"key": s.BuildKey(key),
}).Debug("put to s3")
if err != nil {
return logger.ErrorDebug(err)
}
return nil
}
func (s *S3Storage) List(key string) ([]StorageItem, error) {
resp, err := s.client.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String(s.bucket),
Prefix: aws.String(s.BuildKey(key)),
})
if err != nil {
return nil, logger.ErrorDebug(err)
}
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"key": s.BuildKey(key),
}).Debug("start list object from s3")
items := make([]StorageItem, 0)
for _, object := range resp.Contents {
logger.WithFields(logrus.Fields{
"key": &object.Key,
}).Debug("found object")
item := S3StorageItem{Object: object}
items = append(items, &item)
}
return items, nil
}
func (s *S3Storage) Move(from string, to string) error {
fromKey := s.bucket + "/" + from
toKey := s.bucketBasePath + "/" + to
_, err := s.client.CopyObject(&s3.CopyObjectInput{
Bucket: aws.String(s.bucket),
CopySource: aws.String(fromKey),
Key: aws.String(toKey),
})
logger.WithFields(logrus.Fields{
"from": fromKey,
"to": toKey,
}).Debug("move s3 object start")
if reqerr, ok := err.(awserr.RequestFailure); ok && reqerr.StatusCode() == http.StatusNotFound {
return ErrImageNotFound
} else if err != nil {
return logger.ErrorDebug(err)
}
_, err = s.client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(from),
})
if reqerr, ok := err.(awserr.RequestFailure); ok && reqerr.StatusCode() == http.StatusNotFound {
return ErrImageNotFound
} else if err != nil {
return logger.ErrorDebug(err)
}
return nil
}
func (s *S3StorageItem) IsValid() bool {
if len(s.Extension()) == 0 {
return false
}
if len(s.ImageSize()) == 0 {
return false
}
return true
}
func (s *S3StorageItem) Key() string {
return *s.Object.Key
}
func (s *S3StorageItem) Filename() string {
path := strings.Split(s.Key(), "/")
return path[len(path)-1]
}
func (s *S3StorageItem) Extension() string {
path := strings.Split(*s.Object.Key, ".")
return path[len(path)-1]
}
// KeyFormat: :image_type/:id/:id.:size.:format
func (s *S3StorageItem) ImageSize() string {
path := strings.Split(s.Key(), ".")
return path[len(path)-2]
}
|
[
"\"KINU_S3_REGION\"",
"\"KINU_S3_BUCKET\"",
"\"KINU_S3_BUCKET_BASE_PATH\""
] |
[] |
[
"KINU_S3_BUCKET_BASE_PATH",
"KINU_S3_REGION",
"KINU_S3_BUCKET"
] |
[]
|
["KINU_S3_BUCKET_BASE_PATH", "KINU_S3_REGION", "KINU_S3_BUCKET"]
|
go
| 3 | 0 | |
share/qt/extract_strings_qt.py
|
#!/usr/bin/env python3
# Copyright (c) 2012-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/xcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *xcoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("xcoin-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("xcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("xcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("xcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[] |
[] |
[
"COPYRIGHT_HOLDERS",
"PACKAGE_NAME",
"XGETTEXT",
"COPYRIGHT_HOLDERS_SUBSTITUTION"
] |
[]
|
["COPYRIGHT_HOLDERS", "PACKAGE_NAME", "XGETTEXT", "COPYRIGHT_HOLDERS_SUBSTITUTION"]
|
python
| 4 | 0 | |
cloudatcost_ansible_module/cac_server.py
|
#!/usr/bin/python
# Custom Module to manage server instances in a CloudAtCost
# (https://cloudatcost.com) Cloud
from collections import namedtuple, defaultdict, MutableMapping
import string
from ansible.module_utils.basic import *
DOCUMENTATION = '''
---
module: cac_server
author: "Patrick Toal (@sage905)"
short_description: Create, Delete, Start, Stop, Restart or Update an instance at CloudAtCost
description: >
Manage servers at CloudAtCost via the API:
U(https://github.com/cloudatcost/api)
An API user and key must be acquired per the Instructions in the API docs.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'active', 'started', 'absent', 'deleted', 'stopped', 'restarted']
default: present
api_key:
description:
- CloudAtCost API key
default: null
api_user:
description:
- CloudAtCost API Username
default: null
label:
aliases: name
description:
- Label to give the instance (alphanumeric only. No spaces, dashes, underscored)
default: null
type: string
fqdn:
description:
- Fully Qualified Domain-Name for Reverse DNS setup
default: null
type: string
server_id:
description:
- Unique ID of a CloudAtCost server (optional)
aliases: sid
default: null
type: integer
cpus:
description:
- Number of vCPUs (1-16) to allocate to this instance
default: 1
type: integer
choices: [1-16]
ram:
description:
- Amount of RAM to allocate to this instance (MB)
default: 1024
type: integer
choices: [512, 1024, 2048, 3072, 4096, 6144, 7168, 8192, 12288, 16384, 20480, 24576, 28672, 32768]
storage:
description:
- Amount of Disk Storage to allocate to this instance (GB)
default: 10
choices: [10 - 1000]
template_id:
description:
- Operating System to use for the instance (must be a #id or text description from /v1/listtemplates.php)
# Default of 26 is for CentOS 7 x64
default: 26
type: integer
runmode:
description:
- The `safe` runmode will automatically shutdown the server after 7 days. `Normal` will not auto-shutdown the server
default: safe
type: string
choices: ["safe", "normal"]
wait:
description:
- wait for the instance to be powered on before returning
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
# 15min default. When CloudAtCost is having problems, server provisioning can take DAYS.
default: 900
requirements:
- "python >= 2.6"
- "cacpy >= 0.5.3"
- "pycurl"
notes:
- If I(server_id) is specified, it must match an existing server id.
- If I(server_id) is not specified, the first server in the account that
exactly matches I(label) is used.
- If C(state=present), the server will either be created or updated.
- Only the following attributes can be updated after creation:
- I(label) (Must provide server_id to change)
- I(fqdn)
- I(run_mode)
- I(state)
- If C(state in ('absent', 'deleted')), the server will be destroyed! Use
with caution.
- CAC_API_KEY and CAC_API_USER environment variables can be used instead
of I(api_key) and I(api_user)
'''
EXAMPLES = '''
---
# Create a server
- local_action:
module: cac_server
api_user: [email protected]
api_key: 'longStringFromCACApi'
label: cloudatcost-test1
cpus: 1
ram: 1024
storage: 10
template_id: 26
runmode: safe
wait: yes
wait_timeout: 3600
state: present
# Ensure a running server (create if missing)
- local_action:
module: cac_server
api_user: [email protected]
api_key: 'longStringFromCaCAPI'
label: cloudatcost-test1
cpus: 1
ram: 1024
storage: 10
template_id: 26
runmode: safe
wait: yes
wait_timeout: 3600
state: present
# Delete a server
- local_action:
module: cac_server
api_user: [email protected]
api_key: 'longStringFromCaCAPI'
sid: 12345678
label: cloudatcost-test1
state: absent
# Stop a server
- local_action:
module: cac_server
api_user: [email protected]
api_key: 'longStringFromCaCAPI'
label: cloudatcost-test1
state: stopped
# Reboot a server
- local_action:
module: cac_server
api_user: [email protected]
api_key: 'longStringFromCaCAPI'
label: cloudatcost-test1
state: restarted
'''
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
try:
import pycurl
HAS_PYCURL = True
except ImportError:
HAS_PYCURL = False
try:
from cacpy import CACPy
HAS_CAC = True
except ImportError:
HAS_CAC = False
class CacApiError(Exception):
"""
Raised when something went wrong during a call to CloudAtCost's API
"""
def get_server(api, server_id=None, label=None, server_name=None):
"""
Use the CAC API to search for the provided server_id, servername, or label
and return the first match found as a CACServer instance.
Returns None if no server found.
"""
assert server_id is not None or label is not None or server_name is not None
try:
server = next(
server for server in api.get_server_info().get('data') if
server['sid'] == str(server_id) or server['servername'] == server_name or server['label'] == label)
except StopIteration:
return None
return CACServer(api, server)
def check_ok(response):
""" Verify that the API Call has an 'ok' status. """
if response['status'] != 'ok':
raise CacApiError('CloudAtCost API call failed. Status: %s' % response)
return True
class CACTemplate(namedtuple('CACTemplate', ['desc', 'template_id'])):
""" Represent a CloudAtCost OS Template """
# Cache templates as they aren't likely to change during execution.
templates = {}
@classmethod
def get_template(cls, api, lookup=None):
"""Return a CACTemplate after querying the Cloudatcost API for a list of templates for a match.
Required Arguments:
lookup - Description or id to be matched
Raises:
LookupError if desc or template_id can't be found
ValueError if no lookup parameters are provided
"""
assert lookup is not None, "Must provide an id or description to lookup."
if isinstance(lookup, cls):
lookup = lookup.template_id
if isinstance(lookup, int):
lookup = str(lookup)
if not cls.templates:
cls.templates = api.get_template_info()['data']
try:
template = next(t for t in cls.templates
if t.get('ce_id') == lookup or t.get('name') == lookup)
except StopIteration:
raise LookupError("Template with ID or description: " + lookup + " was not found")
return cls(template.get('name'), template.get('ce_id'))
def _poller(poll_func, waittime=300, interval=1):
for t in range(1, waittime, interval):
time.sleep(interval)
result = poll_func()
if result:
return result
class CACServer(MutableMapping):
"""Represent a server instance at cloudatost. Perform checking and validation
on attribute modification, to ensure valid state transitions before committing
to CloudAtCost API.
"""
def _set_label(self, value):
check_ok(self.api.rename_server(new_name=value, server_id=self._current_state['sid']))
def _set_rdns(self, value):
check_ok(self.api.change_hostname(new_hostname=value, server_id=self._current_state['sid']))
def _set_mode(self, value):
check_ok(self.api.set_run_mode(run_mode=value, server_id=self._current_state['sid']))
def _set_status(self, value):
if value in ('Powered On', 'on'):
check_ok(self.api.power_on_server(server_id=self._current_state['sid']))
elif value in ('Powered Off', 'off'):
check_ok(self.api.power_off_server(server_id=self._current_state['sid']))
elif value in ('Restarted', 'restart'):
check_ok(self.api.reset_server(server_id=self._current_state['sid']))
elif value in ('Deleted', 'delete'):
check_ok(self.api.server_delete(server_id=self['sid']))
_modify_functions = {'label': _set_label, 'rdns': _set_rdns, 'status': _set_status, 'mode': _set_mode}
def __init__(self, api, server):
self.api = api
self._current_state = dict(server)
self._changed_attrs = dict()
if server['template'] is not None:
self._current_state['template'] = CACTemplate.get_template(api, server['template'])
def __delitem__(self, key):
self._changed_attrs.__delitem__(key)
def __len__(self):
return len(self.__getstate__())
def __iter__(self):
return self.__getstate__().__iter__()
def __getitem__(self, item):
"""
Get the modified value of the server attribute, or the existing value if not modified.
:raises AttributeError if attribute not found
"""
return self._changed_attrs[item] if item in self._changed_attrs else self._current_state[item]
def __setitem__(self, key, value):
if key in self.__class__._modify_functions:
if self._current_state[key] != value:
self._changed_attrs[key] = value
else:
raise KeyError(self.__class__.__name__ + " does not have a modifiable item: " + key)
def __repr__(self):
return ('{cls.__name__}(api_account={self.api.email}, sid={self[sid]}, '
'label={label})').format(
cls=type(self), self=self, label=self.get('label'))
def __getstate__(self):
return self._current_state.copy()
def check(self):
return bool(self._changed_attrs)
def commit(self):
# Only commit existing records.
if self['sid'] is None:
raise AttributeError("Server commit failed. sid property not set on CACServer object.")
if get_server(self.api, server_id=self['sid']) is None:
raise LookupError("Unable to find server with sid: " + str(self['sid']))
if len(self._changed_attrs) > 0:
for (item, value) in list(self._changed_attrs.items()):
self._modify_functions[item](self, value)
return get_server(self.api, server_id=self['sid'])
else:
return self
@staticmethod
def check_server_status(api, servername, status):
def f():
server = get_server(api, server_name=servername)
if server and server['status'] == status:
return server
return f
@staticmethod
def build_server(api, cpu, ram, disk, template, label, wait=False, wait_timeout=300):
"""
Build a server with the provided parameters
:param label: Name to give the server for the Panel
:param api: CACPy instance
:param cpu: # of vCPU's to allocate
:param ram: RAM to allocate (MB)
:param disk: Disk to allocate (GB)
:param template: OS Template to use (id, or string)
:param wait: Wait for server build to complete
:param wait_timeout: Seconds to wait for build to complete
:return: ( request.Response, CACServer ) response from CAC server, CACServer object if build completed
:raises CacApiError on any error
"""
_required_build_params = ('cpu', 'ram', 'disk', 'template', 'label')
assert isinstance(api, CACPy)
missing = [param for param in _required_build_params if not locals()[param]]
if missing:
raise AttributeError("Server Build missing arguments: " + " ".join(missing))
assert ram in [512, 1024, 2048, 3072, 4096, 6144, 7168, 8192, 12288, 16384, 20480, 24576, 28672, 32768]
assert cpu in range(1, 16)
os_template = CACTemplate.get_template(api, template)
# The CloudAtCost Build timing can be unpredictable, but optimally works like:
# 1. call API to build.
# 2. JSON response includes result: successful, a taskid (which is useless since the listtasks API
# doesn't work), and a servername.
# 3. Once the task is queued, a new server will show up in the listservers call with the servername and a
# status of "Installing". This could take minutes, hours, or days.
# 4. The server will be built, and at that time, the status will change to "Powered On". The server then takes
# some time to boot.
# Queue the build
server = None
response = api.server_build(cpu, ram, disk, os_template.template_id)
if response.get('result') == 'successful':
# Optionally wait for the server to be Powered On. Poll every 10s.
if wait:
server = _poller(CACServer.check_server_status(api, response.get('servername'), 'Powered On'),
wait_timeout, 10)
# Set the label, so we can find it again in the future
if server:
server['label'] = label
server.commit()
return server, response
else:
raise CacApiError(string.Formatter().vformat("Server Build Failed. Status: {status} "
"#{error}, \"{error_description}\" ",
(), defaultdict(str, **response)))
def get_api(api_user, api_key):
try:
if not api_key:
api_key = os.environ['CAC_API_KEY']
if not api_user:
api_user = os.environ['CAC_API_USER']
except KeyError:
raise CacApiError("Unable to get %s for CloudAtCost connection" % (
"api key from parameter or CAC_API_KEY environment variable" if not api_key else
"api user from paramater or CAC_API_USER environment variable"))
api = CACPy(api_user, api_key)
check_ok(api.get_resources())
return api
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present',
choices=['active', 'present', 'started',
'deleted', 'absent', 'stopped',
'restarted']),
api_key=dict(type='str'),
api_user=dict(type='str'),
label=dict(type='str', aliases=['name']),
fqdn=dict(type='str'),
cpus=dict(type='int'),
ram=dict(type='int'),
storage=dict(type='int'),
template=dict(),
runmode=dict(type='str'),
server_id=dict(type='int', aliases=['sid']),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=300),
),
supports_check_mode=True
)
if not HAS_PYCURL:
module.fail_json(msg='pycurl required for this module')
if not HAS_CAC:
module.fail_json(msg='CACPy required for this module')
changed = False
response = None
state = module.params.get('state')
label = module.params.get('label')
rdns = module.params.get('fqdn')
cpus = module.params.get('cpus')
runmode = module.params.get('runmode')
ram = module.params.get('ram')
storage = module.params.get('storage')
template = module.params.get('template')
server_id = module.params.get('server_id')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
try:
api = get_api(module.params.get('api_user'), module.params.get('api_key'))
server = get_server(api, server_id=server_id, label=label)
if state in ('absent', 'deleted'):
if server:
server['status'] = "Deleted"
else:
module.exit_json(changed=False, server=None, response=None)
else:
# For any other state, we need a server object.
if not server:
if module.check_mode:
changed = True
else:
server, response = CACServer.build_server(api, cpus, ram, storage, template, label, wait,
wait_timeout)
if response['result'] == "successful":
changed = True
else:
module.fail_json(
msg="Build initiated but no server was returned. Check CloudAtCost Panel. You "
"will need to manually set the server label in the panel before trying again."
"Response: %s" % response)
if not server:
# We didn't wait for it to build, or it timed out
module.exit_json(changed=True, server=None, response=response)
if state in ('present', 'active', 'started'):
server['status'] = 'Powered On'
elif state == 'stopped':
server['status'] = 'Powered Off'
elif state == 'restarted':
server['status'] = 'Restarted'
if label:
server['label'] = label
if rdns:
server['rdns'] = rdns
if runmode:
# runmode reports as "Normal" or "Safe", but the api only accepts "normal", or "safe"
if server['mode'].lower() != runmode.lower():
server['mode'] = runmode.lower()
if module.check_mode:
changed = server.check()
server = None
else:
updated = server.commit()
if updated != server:
changed = True
server = updated
module.exit_json(changed=changed, server=server, response=response)
except Exception as e:
module.fail_json(msg='%s' % e.message)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CAC_API_KEY",
"CAC_API_USER"
] |
[]
|
["CAC_API_KEY", "CAC_API_USER"]
|
python
| 2 | 0 | |
students/k33422/Alexandrin_Anton/practical_works/django_project_alexandrin/django_project_alexandrin/asgi.py
|
"""
ASGI config for django_project_alexandrin project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project_alexandrin.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
obs-tn/generate_obs-tn_pdf.py
|
#!/usr/bin/env python2
# -*- coding: utf8 -*-
#
# Copyright (c) 2019 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# Richard Mahn <[email protected]>
"""
This script generates the HTML and PDF OBS tN document
"""
from __future__ import unicode_literals, print_function
import os
import sys
import re
import logging
import argparse
import tempfile
import markdown2
import shutil
import subprocess
import json
import git
from glob import glob
from bs4 import BeautifulSoup
from ..general_tools.file_utils import write_file, read_file, load_json_object, unzip, load_yaml_object
_print = print
DEFAULT_LANG = 'en'
DEFAULT_OWNER = 'unfoldingWord'
DEFAULT_TAG = 'master'
OWNERS = [DEFAULT_OWNER, 'STR', 'Door43-Catalog']
LANGUAGE_FILES = {
'fr': 'French-fr_FR.json',
'en': 'English-en_US.json'
}
def print(obj):
_print(json.dumps(obj, ensure_ascii=False, indent=2).encode('utf-8'))
class ObsTnConverter(object):
def __init__(self, obs_tn_tag=None, obs_tag=None, tw_tag=None, ta_tag=None, working_dir=None, output_dir=None,
lang_code=DEFAULT_LANG, owner=DEFAULT_OWNER, regenerate=False, logger=None):
self.obs_tn_tag = obs_tn_tag
self.obs_tag = obs_tag
self.tw_tag = tw_tag
self.ta_tag = ta_tag
self.working_dir = working_dir
self.output_dir = output_dir
self.lang_code = lang_code
self.owner = owner
self.regenerate = regenerate
self.logger = logger
if not self.working_dir:
self.working_dir = tempfile.mkdtemp(prefix='obs-tn-')
if not self.output_dir:
self.output_dir = self.working_dir
self.logger.info('WORKING DIR IS {0} FOR {1}'.format(self.working_dir, self.lang_code))
self.obs_tn_dir = os.path.join(self.working_dir, '{0}_obs-tn'.format(lang_code))
self.obs_dir = os.path.join(self.working_dir, '{0}_obs'.format(lang_code))
self.tw_dir = os.path.join(self.working_dir, '{0}_tw'.format(lang_code))
self.ta_dir = os.path.join(self.working_dir, '{0}_ta'.format(lang_code))
self.html_dir = os.path.join(self.output_dir, 'html')
if not os.path.isdir(self.html_dir):
os.makedirs(self.html_dir)
self.manifest = None
self.tw_manifest = None
self.ta_manifest = None
self.obs_tn_text = ''
self.tw_text = ''
self.ta_text = ''
self.tw_cat = {}
self.bad_links = {}
self.bad_notes = {}
self.resource_data = {}
self.rc_references = {}
self.version = None
self.publisher = None
self.contributors = None
self.issued = None
self.file_id = None
self.my_path = os.path.dirname(os.path.realpath(__file__))
self.generation_info = {}
self.title = 'unfoldingWord® Open Bible Stories Translation Notes'
self.tw_title = 'Translation Words'
self.ta_title = 'Translation Academy'
self.translations = {}
def translate(self, key):
if not self.translations:
if self.lang_code not in LANGUAGE_FILES:
self.logger.error('No locale file for {0}.'.format(self.lang_code))
exit(1)
locale_file = os.path.join(self.my_path, '..', 'locale', LANGUAGE_FILES[self.lang_code])
if not os.path.isfile(locale_file):
self.logger.error('No locale file found at {0} for {1}.'.format(locale_file, self.lang_code))
exit(1)
self.translations = load_json_object(locale_file)
keys = key.split('.')
t = self.translations
for key in keys:
t = t.get(key, None)
if t is None:
# handle the case where the self.translations doesn't have that (sub)key
print("No translation for `{0}`".format(key))
break
return t
def run(self):
# self.load_resource_data()
self.setup_resource_files()
self.file_id = '{0}_obs-tn_{1}_{2}'.format(self.lang_code, self.obs_tn_tag, self.generation_info['obs-tn']['commit'])
self.determine_if_regeneration_needed()
self.manifest = load_yaml_object(os.path.join(self.obs_tn_dir, 'manifest.yaml'))
self.tw_manifest = load_yaml_object(os.path.join(self.tw_dir, 'manifest.yaml'))
self.ta_manifest = load_yaml_object(os.path.join(self.ta_dir, 'manifest.yaml'))
self.version = self.manifest['dublin_core']['version']
self.title = self.manifest['dublin_core']['title']
if 'subject' in self.tw_manifest['dublin_core']:
self.tw_title = self.tw_manifest['dublin_core']['subject']
if 'subject' in self.ta_manifest['dublin_core']:
self.ta_title = self.ta_manifest['dublin_core']['subject']
self.contributors = '<br/>'.join(self.manifest['dublin_core']['contributor'])
self.publisher = self.manifest['dublin_core']['publisher']
self.issued = self.manifest['dublin_core']['issued']
self.file_id = self.file_id
self.load_tw_cat()
self.logger.info('Creating OBS tN HTML files for {0}...'.format(self.file_id))
if self.regenerate or not os.path.exists(os.path.join(self.output_dir, '{0}.html'.format(self.file_id))):
self.generate_obs_tn_content()
self.logger.info('Generating Body HTML for {0}...'.format(self.file_id))
self.generate_body_html()
self.logger.info('Generating Cover HTML for {0}...'.format(self.file_id))
self.generate_cover_html()
self.logger.info('Generating License HTML for {0}...'.format(self.file_id))
self.generate_license_html()
self.logger.info('Copying style sheet file for {0}...'.format(self.file_id))
style_file = os.path.join(self.my_path, 'obs-tn_style.css')
shutil.copy2(style_file, self.html_dir)
self.save_resource_data()
self.save_bad_links()
self.save_bad_notes()
self.logger.info('Generating PDF {0}/{1}.pdf...'.format(self.output_dir, self.file_id))
self.generate_obs_tn_pdf()
self.logger.info('PDF file can be found at {0}/{1}.pdf'.format(self.output_dir, self.file_id))
def save_bad_links(self):
bad_links = "BAD LINKS:\n"
for source_rc in sorted(self.bad_links.keys()):
for rc in sorted(self.bad_links[source_rc].keys()):
source = source_rc[5:].split('/')
parts = rc[5:].split('/')
if source[1] == 'obs-tn':
if parts[1] == 'tw':
str = ' tW'
else:
str = ' tN'
str += ' {0} {1}:{2}'.format(source[3].upper(), source[4], source[5])
else:
str = ' {0}'.format(source_rc)
str += ': BAD RC - `{0}`'.format(rc)
if self.bad_links[source_rc][rc]:
str += ' - change to `{0}`'.format(self.bad_links[source_rc][rc])
bad_links += "{0}\n".format(str)
save_file = os.path.join(self.output_dir, '{0}_bad_links.txt'.format(self.file_id))
write_file(save_file, bad_links)
self.logger.info('BAD LINKS file can be found at {0}'.format(save_file))
def save_bad_notes(self):
bad_notes = '<!DOCTYPE html><html lang="en-US"><head data-suburl=""><title>NON-MATCHING NOTES</title><meta charset="utf-8"></head><body><p>NON-MATCHING NOTES (i.e. not found in the frame text as written):</p><ul>'
for cf in sorted(self.bad_notes.keys()):
bad_notes += '<li><a href="{0}_html/{0}.html#obs-tn-{1}" title="See in the OBS tN Docs (HTML)" target="obs-tn-html">{1}</a><a href="https://git.door43.org/{6}/{2}_obs-tn/src/branch/{7}/content/{3}/{4}.md" style="text-decoration:none" target="obs-tn-git"><img src="http://www.myiconfinder.com/uploads/iconsets/16-16-65222a067a7152473c9cc51c05b85695-note.png" title="See OBS UTN note on DCS"></a><a href="https://git.door43.org/{6}/{2}_obs/src/branch/master/content/{3}.md" style="text-decoration:none" target="obs-git"><img src="https://cdn3.iconfinder.com/data/icons/linecons-free-vector-icons-pack/32/photo-16.png" title="See OBS story on DCS"></a>:<br/><i>{5}</i><br/><ul>'.format(
self.file_id, cf, self.lang_code, cf.split('-')[0], cf.split('-')[1], self.bad_notes[cf]['text'], self.owner, DEFAULT_TAG)
for note in self.bad_notes[cf]['notes']:
for key in note.keys():
if note[key]:
bad_notes += '<li><b><i>{0}</i></b><br/>{1} (QUOTE ISSUE)</li>'.format(key, note[key])
else:
bad_notes += '<li><b><i>{0}</i></b></li>'.format(key)
bad_notes += '</ul></li>'
bad_notes += "</u></body></html>"
save_file = os.path.join(self.output_dir, '{0}_bad_notes.html'.format(self.file_id))
write_file(save_file, bad_notes)
self.logger.info('BAD NOTES file can be found at {0}'.format(save_file))
@staticmethod
def get_resource_git_url(resource, lang, owner):
return 'https://git.door43.org/{0}/{1}_{2}.git'.format(owner, lang, resource)
def clone_resource(self, resource, tag=DEFAULT_TAG, url=None):
if not url:
url = self.get_resource_git_url(resource, self.lang_code, self.owner)
repo_dir = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource))
if not os.path.isdir(repo_dir):
try:
git.Repo.clone_from(url, repo_dir)
except git.GitCommandError:
owners = OWNERS
owners.insert(0, self.owner)
languages = [self.lang_code, DEFAULT_LANG]
if not os.path.isdir(repo_dir):
for lang in languages:
for owner in owners:
url = self.get_resource_git_url(resource, lang, owner)
try:
git.Repo.clone_from(url, repo_dir)
except git.GitCommandError:
continue
break
if os.path.isdir(repo_dir):
break
g = git.Git(repo_dir)
g.fetch()
g.checkout(tag)
if tag == DEFAULT_TAG:
g.pull()
commit = g.rev_parse('HEAD', short=10)
self.generation_info[resource] = {'tag': tag, 'commit': commit}
def setup_resource_files(self):
self.clone_resource('obs-tn', self.obs_tn_tag)
self.clone_resource('obs', self.obs_tag)
self.clone_resource('tw', self.tw_tag)
self.clone_resource('ta', self.ta_tag)
if not os.path.isfile(os.path.join(self.html_dir, 'logo-obs-tn.png')):
command = 'curl -o {0}/logo-obs-tn.png https://cdn.door43.org/assets/uw-icons/logo-obs-256.png'.format(
self.html_dir)
subprocess.call(command, shell=True)
def load_tw_cat(self):
mapping = {
'idol': 'falsegod',
'witness': 'testimony',
'newcovenant': 'covenant',
'taxcollector': 'tax',
'believer': 'believe'
}
tw_cat_file = os.path.join(self.working_dir, 'tw_cat.json')
if not os.path.isfile(tw_cat_file):
command = 'curl -o {0} https://cdn.door43.org/v2/ts/obs/en/tw_cat.json'.format(
tw_cat_file)
subprocess.call(command, shell=True)
tw_cat = load_json_object(tw_cat_file)
for chapter in tw_cat['chapters']:
self.tw_cat[chapter['id']] = {}
for frame in chapter['frames']:
self.tw_cat[chapter['id']][frame['id']] = []
for item in frame['items']:
term = item['id']
category = None
for c in ['kt', 'names', 'other']:
if os.path.exists(os.path.join(self.tw_dir, 'bible', c, '{0}.md'.format(term))):
category = c
break
if not category and term in mapping:
category = None
for c in ['kt', 'names', 'other']:
if os.path.exists(os.path.join(self.tw_dir, 'bible', c, '{0}.md'.format(mapping[term]))):
category = c
term = mapping[term]
break
if category:
self.tw_cat[chapter['id']][frame['id']].append('rc://{0}/tw/dict/bible/{1}/{2}'.format(
self.lang_code, category, term))
if not category or term != item['id']:
fix = None
if term != item['id']:
fix = term
source_rc = 'tw_cat.json {0}/{1}'.format(chapter['id'], frame['id'])
if source_rc not in self.bad_links:
self.bad_links[source_rc] = {}
self.bad_links[source_rc][item['id']] = fix
def determine_if_regeneration_needed(self):
# check if any commit hashes have changed
old_info = self.get_previous_generation_info()
if not old_info:
self.logger.info('Looks like this is a new commit of {0}. Generating PDF.'.format(self.file_id))
self.regenerate = True
else:
for resource in self.generation_info:
if resource in old_info and resource in self.generation_info \
and (old_info[resource]['tag'] != self.generation_info[resource]['tag']
or old_info[resource]['commit'] != self.generation_info[resource]['commit']):
self.logger.info('Resource {0} has changed: {1} => {2}, {3} => {4}. REGENERATING PDF.'.format(
resource, old_info[resource]['tag'], self.generation_info[resource]['tag'],
old_info[resource]['commit'], self.generation_info[resource]['commit']
))
self.regenerate = True
def get_contributors_html(self):
if self.contributors and len(self.contributors):
return '''
<div id="contributors" class="article">
<h1 class="section-header">{0}</h1>
<p>
{1}
</p>
</div>
'''.format(self.translate('contributors'), self.contributors)
else:
return ''
def save_resource_data(self):
save_dir = os.path.join(self.output_dir, 'save')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, '{0}_resource_data.json'.format(self.file_id))
write_file(save_file, self.resource_data)
save_file = os.path.join(save_dir, '{0}_references.json'.format(self.file_id))
write_file(save_file, self.rc_references)
save_file = os.path.join(save_dir, '{0}_bad_links.json'.format(self.file_id))
write_file(save_file, self.bad_links)
save_file = os.path.join(save_dir, '{0}_bad_notes.json'.format(self.file_id))
write_file(save_file, self.bad_notes)
save_file = os.path.join(save_dir, '{0}_generation_info.json'.format(self.file_id))
write_file(save_file, self.generation_info)
def get_previous_generation_info(self):
save_dir = os.path.join(self.output_dir, 'save')
save_file = os.path.join(save_dir, '{0}_generation_info.json'.format(self.file_id))
if os.path.isfile(save_file):
return load_json_object(save_file)
else:
return {}
def load_resource_data(self):
save_dir = os.path.join(self.output_dir, 'save')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, '{0}_resource_data.json'.format(self.file_id))
if os.path.isfile(save_file):
self.resource_data = load_json_object(save_file)
save_file = os.path.join(save_dir, '{0}_references.json'.format(self.file_id))
if os.path.isfile(save_file):
self.rc_references = load_json_object(save_file)
save_file = os.path.join(save_dir, '{0}_bad_links.json'.format(self.file_id))
if os.path.isfile(save_file):
self.bad_links = load_json_object(save_file)
def generate_body_html(self):
obs_tn_html = self.obs_tn_text
ta_html = self.get_ta_html()
tw_html = self.get_tw_html()
contributors_html = self.get_contributors_html()
html = '\n'.join([obs_tn_html, tw_html, ta_html, contributors_html])
html = self.replace_rc_links(html)
html = self.fix_links(html)
html = '''<!DOCTYPE html>
<html lang="en-US">
<head data-suburl="">
<meta charset="utf-8"/>
<title>{0} - v{1}</title>
</head>
<body>
{2}
</body>
</html>
'''.format(self.title, self.version, html)
soup = BeautifulSoup(html, 'html.parser')
# Make all headers that have a header right before them non-break
for h in soup.find_all(['h2', 'h3', 'h4', 'h5', 'h6']):
prev = h.find_previous_sibling()
if prev and re.match('^h[2-6]$', prev.name):
h['class'] = h.get('class', []) + ['no-break']
# Make all headers within the page content to just be span tags with h# classes
for h in soup.find_all(['h3', 'h4', 'h5', 'h6']):
if not h.get('class') or 'section-header' not in h['class']:
h['class'] = h.get('class', []) + [h.name]
h.name = 'span'
soup.head.append(soup.new_tag('link', href="html/obs-tn_style.css", rel="stylesheet"))
html_file = os.path.join(self.output_dir, '{0}.html'.format(self.file_id))
write_file(html_file, unicode(soup))
self.logger.info('Wrote HTML to {0}'.format(html_file))
def generate_cover_html(self):
cover_html = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<link href="obs-tn_style.css" rel="stylesheet"/>
</head>
<body>
<div style="text-align:center;padding-top:200px" class="break" id="cover">
<img src="logo-obs-tn.png" width="120">
<span class="h1">{0}</span>
<span class="h3">{1} {2}</span>
</div>
</body>
</html>
'''.format(self.title, self.translate('license.version'), self.version)
html_file = os.path.join(self.html_dir, '{0}_cover.html'.format(self.file_id))
write_file(html_file, cover_html)
def generate_license_html(self):
license_file = os.path.join(self.obs_tn_dir, 'LICENSE.md')
license = markdown2.markdown_path(license_file)
license_html = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<link href="obs-tn_style.css" rel="stylesheet"/>
</head>
<body>
<div class="break">
<span class="h1">{4}</span>
<p>
<strong>{5}:</strong> {0}<br/>
<strong>{6}:</strong> {1}<br/>
<strong>{7}:</strong> {2}<br/>
</p>
{3}
</div>
</body>
</html>
'''.format(self.issued, self.version, self.publisher, license,
self.translate('license.copyrights_and_licensing'),
self.translate('license.date'),
self.translate('license.version'),
self.translate('license.published_by'))
html_file = os.path.join(self.html_dir, '{0}_license.html'.format(self.file_id))
write_file(html_file, license_html)
def generate_obs_tn_pdf(self):
cover_file = os.path.join(self.html_dir, '{0}_cover.html'.format(self.file_id))
license_file = os.path.join(self.html_dir, '{0}_license.html'.format(self.file_id))
header_file = os.path.join(self.my_path, 'obs-tn_header.html')
footer_file = os.path.join(self.my_path, 'obs-tn_footer.html')
body_file = os.path.join(self.output_dir, '{0}.html'.format(self.file_id))
output_file = os.path.join(self.output_dir, '{0}.pdf'.format(self.file_id))
template_file = os.path.join(self.my_path, '{0}_toc_template.xsl'.format(self.lang_code))
command = '''wkhtmltopdf
--javascript-delay 2000
--debug-javascript
--cache-dir "{6}"
--run-script "setInterval(function(){{if(document.readyState=='complete') setTimeout(function() {{window.status='done';}}, 100);}},200)"
--encoding utf-8
--outline-depth 3
-O portrait
-L 15 -R 15 -T 15 -B 15
--header-html "{0}"
--header-spacing 2
--footer-html "{7}"
cover "{1}"
cover "{2}"
toc
--disable-dotted-lines
--enable-external-links
--xsl-style-sheet "{3}"
--toc-header-text "{8}"
"{4}"
"{5}"
'''.format(header_file, cover_file, license_file, template_file, body_file, output_file,
os.path.join(self.working_dir, 'wkhtmltopdf'), footer_file,
self.translate('table_of_contents'))
command = re.sub(r'\s+', ' ', command, flags=re.MULTILINE)
self.logger.info(command)
subprocess.call(command, shell=True)
@staticmethod
def highlight_text(text, note):
parts = re.split(r"\s*…\s*|\s*\.\.\.\s*", note)
processed_text = ''
to_process_text = text
for idx, part in enumerate(parts):
split_pattern = re.escape(part)
if '<span' in text:
split_pattern = '({0})'.format(re.sub('(\\\\ )+', '(\s+|(\s*</*span[^>]*>\s*)+)', split_pattern))
else:
split_pattern = '({0})'.format(split_pattern)
splits = re.split(split_pattern, to_process_text, 1)
processed_text += splits[0]
if len(splits) > 1:
processed_text += '<span class="highlight{0}">{1}</span>'.format(' split' if len(parts) > 1 else '',
splits[1])
if len(splits) > 2:
to_process_text = splits[-1]
if to_process_text:
processed_text += to_process_text
return processed_text
def highlight_text_with_frame(self, orig_text, frame_html, cf):
ignore = ['A Bible story from', 'Connecting Statement', 'Connecting Statement:',
'General Information', 'General Note', 'Information générale',
'Termes Importants', 'Une histoire biblique tirée de', 'Une histoire de la Bible tirée de',
'Une histoire de la Bible à partir', 'Une histoire de la Bible à partir de',
'Mots de Traduction', 'Nota geral', 'Déclaration de connexion', 'Cette histoire biblique est tirée',
'Une histoire biblique tirée de:', 'Informations générales', 'Information Générale']
highlighted_text = orig_text
phrases = []
soup = BeautifulSoup(frame_html, 'html.parser')
headers = soup.find_all('h4')
for header in headers:
phrases.append(header.text)
phrases.sort(key=len, reverse=True)
for phrase in phrases:
new_highlighted_text = self.highlight_text(highlighted_text, phrase)
if new_highlighted_text != highlighted_text:
highlighted_text = new_highlighted_text
elif phrase not in ignore:
if cf not in self.bad_notes:
self.bad_notes[cf] = {
'text': orig_text,
'notes': []
}
bad_note = {phrase: None}
alt_notes = [
phrase.replace('‘', "'").replace('’', "'").replace('“', '"').replace('”', '"'),
phrase.replace("'", '’').replace('’', '‘', 1).replace('"', '”').replace('”', '“', 1),
phrase.replace('‘', "'").replace('’', "'").replace('“', '"').replace('”', '"'),
phrase.replace("'", '’').replace('’', '‘', 1).replace('"', '”').replace('”', '“', 1),
phrase.replace('“', '"').replace('”', '"'),
phrase.replace('"', '”').replace('”', '“', 1),
phrase.replace("'", '’').replace('’', '‘', 1),
phrase.replace("'", '’'),
phrase.replace('’', "'"),
phrase.replace('‘', "'")]
for alt_note in alt_notes:
if orig_text != self.highlight_text(orig_text, alt_note):
bad_note[phrase] = alt_note
break
self.bad_notes[cf]['notes'].append(bad_note)
return highlighted_text
def generate_obs_tn_content(self):
content = '''
<div id="obs-tn" class="resource-title-page">
<h1 class="section-header">{0}</h1>
</div>
'''.format(self.title.replace('unfoldingWord® ', ''))
chapter_dirs = sorted(glob(os.path.join(self.obs_tn_dir, 'content', '*')))
for chapter_dir in chapter_dirs:
if os.path.isdir(chapter_dir):
chapter = os.path.basename(chapter_dir)
soup = BeautifulSoup(
markdown2.markdown_path(os.path.join(self.obs_dir, 'content', '{0}.md'.format(chapter))),
'html.parser')
title = soup.h1.text
paragraphs = soup.find_all('p')
frames = []
for idx, p in enumerate(paragraphs): # iterate over loop [above sections]
if idx % 2:
frames.append(p.text)
content += '<div id="chapter-{0}" class="chapter break">\n\n'.format(chapter)
content += '<h2>{0}</h2>\n'.format(title)
frame_files = sorted(glob(os.path.join(chapter_dir, '*.md')))
for frame_file in frame_files:
frame = os.path.splitext(os.path.basename(frame_file))[0]
frame_idx = int(frame)
id = 'obs-tn-{0}-{1}'.format(chapter, frame)
content += '<div id="{0}" class="frame">\n'.format(id)
content += '<h3>{0}:{1}</h3>\n'.format(chapter, frame)
text = ''
if frame_idx > 0:
text = re.sub(r'[\n\s]+', ' ', frames[frame_idx - 1], flags=re.MULTILINE)
frame_html = markdown2.markdown_path(frame_file)
frame_html = frame_html.replace('h1>', 'h4>')
frame_html = frame_html.replace('h2>', 'h5>')
frame_html = frame_html.replace('h3>', 'h6>')
frame_html = re.sub(r'href="(\d+)/(\d+)"', r'href="#obs-tn-\1-\2"', frame_html)
if text:
text = self.highlight_text_with_frame(text, frame_html, '{0}:{1}'.format(chapter, frame))
if '/tw/' not in frame_html and chapter in self.tw_cat and frame in self.tw_cat[chapter]\
and len(self.tw_cat[chapter][frame]):
frame_html += "<h3>{0}</h3>\n<ul>".format(self.tw_title)
for rc in self.tw_cat[chapter][frame]:
frame_html += '<li>[[{0}]]</li>'.format(rc)
frame_html += '</ul>'
content += '<div id="{0}-text" class="frame-text">\n{1}\n</div>\n'.format(id, text)
content += frame_html
content += '</div>\n\n'
# HANDLE RC LINKS
rc = 'rc://{0}/obs-tn/help/{1}/{2}'.format(self.lang_code, chapter, frame)
self.resource_data[rc] = {
'rc': rc,
'id': id,
'link': '#' + id,
'title': title
}
self.get_resource_data_from_rc_links(frame_html, rc)
content += '</div>\n\n'
self.obs_tn_text = content
write_file(os.path.join(self.html_dir, '{0}_obs-tn_content.html'.format(self.file_id)),
BeautifulSoup(content, 'html.parser').prettify())
def get_tw_html(self):
tw_html = ''
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/tw/' not in rc:
continue
reference_text = self.get_reference_text(rc)
if not reference_text:
continue
html = self.resource_data[rc]['text']
html = self.increase_headers(html)
title = self.resource_data[rc]['title']
alt_title = self.resource_data[rc]['alt_title']
if alt_title:
html = '<h2 class="hidden">{0}11</h2><span class="h2 section-header">{1}</span>\n{2}'.\
format(alt_title, title, html)
else:
html = '<h2 class="section-header">{0}</h2>\n{1}'.format(title, html)
tw_html += '<div id="{0}" class="article">\n{1}\n{2}</div>\n\n'.format(self.resource_data[rc]['id'], html,
reference_text)
if tw_html:
tw_html = '<div id="tw" class="resource-title-page">\n<h1 class="section-header">{0}</h1>\n</div>\n\n{1}'.\
format(self.tw_title, tw_html)
return tw_html
def get_ta_html(self):
ta_html = ''
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/ta/' not in rc:
continue
reference_text = self.get_reference_text(rc)
if not reference_text:
continue
if self.resource_data[rc]['text']:
ta_html += '''
<div id="{0}" class="article">
<h2 class="section-header">{1}</h2>
<div class="top-box box">
<div class="ta-question">
{2}: <em>{3}</em>
</div>
</div>
{4}
{5}
</div>
'''.format(self.resource_data[rc]['id'], self.resource_data[rc]['title'],
self.translate('this_page_answers_the_question'),
self.resource_data[rc]['alt_title'],
self.increase_headers(self.resource_data[rc]['text']), self.get_reference_text(rc))
if ta_html:
ta_html = '<div id="ta" class="resource-title-page">\n<h1 class="section-header">{0}</h1>\n</div>\n\n{1}'.\
format(self.ta_title, ta_html)
return ta_html
def has_tn_references(self, rc):
if rc not in self.rc_references:
return False
for reference in self.rc_references[rc]:
if '/obs-tn/' in reference:
return True
return False
def get_reference_text(self, rc):
if not self.has_tn_references(rc):
return ''
uses = ''
references = []
done = {}
for reference in self.rc_references[rc]:
if '/obs-tn/' in reference and reference not in done:
parts = reference[5:].split('/')
id = 'obs-tn-{0}-{1}'.format(parts[3], parts[4])
text = '{0}:{1}'.format(parts[3], parts[4])
references.append('<a href="#{0}">{1}</a>'.format(id, text))
done[reference] = True
if len(references):
uses = '<p class="go-back">\n(<b>{0}:</b> {1})\n</p>\n'.format(self.translate('go_back_to'),
'; '.join(references))
return uses
def get_resource_data_from_rc_links(self, text, source_rc):
if source_rc not in self.bad_links:
self.bad_links[source_rc] = {}
rcs = re.findall(r'rc://[A-Z0-9/_\*-]+', text, flags=re.IGNORECASE | re.MULTILINE)
for rc in rcs:
parts = rc[5:].split('/')
resource = parts[1]
path = '/'.join(parts[3:])
if resource not in ['ta', 'tw']:
continue
if rc not in self.rc_references:
self.rc_references[rc] = []
if source_rc not in self.rc_references[rc]:
self.rc_references[rc].append(source_rc)
title = ''
t = ''
anchor_id = '{0}-{1}'.format(resource, path.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}.md'.format(path))
if not os.path.isfile(file_path):
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}/01.md'.format(path))
fix = None
if not os.path.isfile(file_path):
if resource == 'tw':
for category in ['kt', 'other', 'names']:
path2 = re.sub(r'^bible/([^/]+)/', r'bible/{0}/'.format(category), path.lower())
fix = 'rc://{0}/tw/dict/{1}'.format(self.lang_code, path2)
anchor_id = '{0}-{1}'.format(resource, path2.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}.md'.format(path2))
if os.path.isfile(file_path):
break
elif resource == 'ta':
bad_names = {
'figs-abstractnoun': 'translate/figs-abstractnouns'
}
if parts[3] in bad_names:
path2 = bad_names[parts[3]]
else:
path2 = path
fix = 'rc://{0}/ta/man/{1}'.format(self.lang_code, path2)
anchor_id = '{0}-{1}'.format(resource, path2.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}/01.md'.format(path2))
if os.path.isfile(file_path):
if fix:
self.bad_links[source_rc][rc] = fix
if not rc in self.resource_data:
t = markdown2.markdown_path(file_path)
alt_title = ''
if resource == 'ta':
title_file = os.path.join(os.path.dirname(file_path), 'title.md')
question_file = os.path.join(os.path.dirname(file_path), 'sub-title.md')
if os.path.isfile(title_file):
title = read_file(title_file)
else:
title = self.get_first_header(t)
t = re.sub(r'\s*\n*\s*<h\d>[^<]+</h\d>\s*\n*', r'', t, 1,
flags=re.IGNORECASE | re.MULTILINE) # removes the header
if os.path.isfile(question_file):
alt_title = read_file(question_file)
t = self.fix_ta_links(t, path.split('/')[0])
elif resource == 'tw':
title = self.get_first_header(t)
t = re.sub(r'\s*\n*\s*<h\d>[^<]+</h\d>\s*\n*', r'', t, 1,
flags=re.IGNORECASE | re.MULTILINE) # removes the header
if len(title) > 70:
alt_title = ','.join(title[:70].split(',')[:-1]) + ', ...'
t = re.sub(r'\n*\s*\(See [^\n]*\)\s*\n*', '\n\n', t,
flags=re.IGNORECASE | re.MULTILINE) # removes the See also line
t = self.fix_tw_links(t, path.split('/')[1])
self.resource_data[rc] = {
'rc': rc,
'link': link,
'id': anchor_id,
'title': title,
'alt_title': alt_title,
'text': t,
'references': [source_rc]
}
self.get_resource_data_from_rc_links(t, rc)
else:
if source_rc not in self.resource_data[rc]['references']:
self.resource_data[rc]['references'].append(source_rc)
else:
if rc not in self.bad_links[source_rc]:
self.bad_links[source_rc][rc] = None
rcs = re.findall(r'(?<=\()\.+/[^\)]+(?=\))', text, flags=re.IGNORECASE | re.MULTILINE)
for rc in rcs:
fix = re.sub(r'(\.\./)+(kt|names|other)/([^)]+?)(\.md)*', r'rc://{0}/tw/dict/bible/\2/\3'.
format(self.lang_code), rc, flags=re.IGNORECASE)
if fix != rc:
self.bad_links[source_rc][rc] = fix
else:
self.bad_links[source_rc][rc] = None
rcs = re.findall(r'(?<=\()\.[^ \)]+(?=\))', text, flags=re.IGNORECASE | re.MULTILINE)
for rc in rcs:
fix = None
if '/kt/' in rc or '/names/' in rc or '/other/' in rc:
new_rc = re.sub(r'(\.\./)+(kt|names|other)/([^)]+?)(\.md)*', r'rc://{0}/tw/dict/bible/\2/\3'.
format(self.lang_code), rc, flags=re.IGNORECASE)
if new_rc != rc:
fix = new_rc
self.bad_links[source_rc][rc] = fix
@staticmethod
def increase_headers(text, increase_depth=1):
if text:
for num in range(5, 0, -1):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num), r'<h{0}>\1</h{0}>'.format(num + increase_depth),
text, flags=re.MULTILINE)
return text
@staticmethod
def decrease_headers(text, minimum_header=1, decrease=1):
if text:
for num in range(minimum_header, minimum_header + 10):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num),
r'<h{0}>\1</h{0}>'.format(num - decrease if (num - decrease) <= 5 else 5), text,
flags=re.MULTILINE)
return text
@staticmethod
def get_first_header(text):
lines = text.split('\n')
if len(lines):
for line in lines:
if re.match(r'<h1>', line):
return re.sub(r'<h1>(.*?)</h1>', r'\1', line)
return lines[0]
return "NO TITLE"
def fix_tw_links(self, text, group):
text = re.sub(r'href="\.\./([^/)]+?)(\.md)*"', r'href="rc://{0}/tw/dict/bible/{1}/\1"'.
format(self.lang_code, group), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./([^)]+?)(\.md)*"', r'href="rc://{0}/tw/dict/bible/\1"'.format(self.lang_code),
text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'(\(|\[\[)(\.\./)*(kt|names|other)/([^)]+?)(\.md)*(\)|\]\])(?!\[)',
r'[[rc://{0}/tw/dict/bible/\3/\4]]'.format(self.lang_code), text,
flags=re.IGNORECASE | re.MULTILINE)
return text
def fix_ta_links(self, text, manual):
text = re.sub(r'href="\.\./([^/"]+)/01\.md"', r'href="rc://{0}/ta/man/{1}/\1"'.format(self.lang_code, manual),
text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./\.\./([^/"]+)/([^/"]+)/01\.md"', r'href="rc://{0}/ta/man/\1/\2"'.
format(self.lang_code), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="([^# :/"]+)"', r'href="rc://{0}/ta/man/{1}/\1"'.format(self.lang_code, manual), text,
flags=re.IGNORECASE | re.MULTILINE)
return text
def replace(self, m):
before = m.group(1)
rc = m.group(2)
after = m.group(3)
if rc not in self.resource_data:
return m.group()
info = self.resource_data[rc]
if (before == '[[' and after == ']]') or (before == '(' and after == ')') or before == ' ' \
or (before == '>' and after == '<'):
return '<a href="{0}">{1}</a>'.format(info['link'], info['title'])
if (before == '"' and after == '"') or (before == "'" and after == "'"):
return info['link']
self.logger.error("FOUND SOME MALFORMED RC LINKS: {0}".format(m.group()))
return m.group()
def replace_rc_links(self, text):
# Change rc://... rc links,
# Case 1: [[rc://en/tw/help/bible/kt/word]] => <a href="#tw-kt-word">God's Word</a>
# Case 2: rc://en/tw/help/bible/ht/word => <a href="#tw-kt-word">God's Word</a>
# Case 3: <a href="rc://en/tw/help/bible/kt/word">text</a> => <a href="#tw-kt-word>Text</a> (used in links that are already formed)
# Case 5: Link from a TA or TW article that was not referenced in a TN. Remove the link
# Case 4: Remove other links to other resources not in this tN
def replace_rc(match):
left = match.group(1)
rc = match.group(2)
right = match.group(3)
title = match.group(4)
if rc in self.resource_data:
info = self.resource_data[rc]
if not self.has_tn_references(rc):
# Case 4
return info['title']
if (left and right and left == '[[' and right == ']]') \
or (not left and not right):
# Case 1 and Case 2
return '<a href="{0}">{1}</a>'.format(info['link'], info['title'])
else:
# Case 3
return (left if left else '') + info['link'] + (right if right else '')
else:
# Case 5
return title if title else rc
regex = re.compile(r'(\[\[|<a[^>]+href=")*(rc://[/A-Za-z0-9\*_-]+)(\]\]|"[^>]*>(.*?)</a>)*')
text = regex.sub(replace_rc, text)
return text
@staticmethod
def fix_links(text):
# Change [[http.*]] to <a href="http\1">http\1</a>
text = re.sub(r'\[\[http([^\]]+)\]\]', r'<a href="http\1">http\1</a>', text, flags=re.IGNORECASE)
# convert URLs to links if not already
text = re.sub(r'([^">])((http|https|ftp)://[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])',
r'\1<a href="\2">\2</a>', text, flags=re.IGNORECASE)
# URLS wth just www at the start, no http
text = re.sub(r'([^\/])(www\.[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])', r'\1<a href="http://\2">\2</a>',
text, flags=re.IGNORECASE)
# Removes leading 0s from verse references
text = re.sub(r' 0*(\d+):0*(\d+)(-*)0*(\d*)', r' \1:\2\3\4', text, flags=re.IGNORECASE | re.MULTILINE)
return text
def main(obs_tn_tag, obs_tag, tw_tag, ta_tag, lang_codes, working_dir, output_dir, owner, regenerate):
if not obs_tag:
obs_tag = args.obs_sn
if not lang_codes:
lang_codes = [DEFAULT_LANG]
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if not working_dir and 'WORKING_DIR' in os.environ:
working_dir = os.environ['WORKING_DIR']
print('Using env var WORKING_DIR: {0}'.format(working_dir))
if not output_dir and 'OUTPUT_DIR' in os.environ:
output_dir = os.environ['OUTPUT_DIR']
print('Using env var OUTPUT_DIR: {0}'.format(output_dir))
for lang_code in lang_codes:
_print('Starting OBS TN Converter for {0}...'.format(lang_code))
obs_tn_converter = ObsTnConverter(obs_tn_tag, obs_tag, tw_tag, ta_tag, working_dir, output_dir, lang_code,
owner, regenerate, logger)
obs_tn_converter.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--lang', dest='lang_codes', required=False, help='Language Code(s)', action='append')
parser.add_argument('-w', '--working', dest='working_dir', default=False, required=False, help='Working Directory')
parser.add_argument('-o', '--output', dest='output_dir', default=False, required=False, help='Output Directory')
parser.add_argument('--owner', dest='owner', default=DEFAULT_OWNER, required=False, help='Owner')
parser.add_argument('--obs-tn-tag', dest='obs_tn', default=DEFAULT_TAG, required=False, help='OBS tN Tag')
parser.add_argument('--obs-tag', dest='obs', default=DEFAULT_TAG, required=False, help='OBS Tag')
parser.add_argument('--ta-tag', dest='ta', default=DEFAULT_TAG, required=False, help='tA Tag')
parser.add_argument('--tw-tag', dest='tw', default=DEFAULT_TAG, required=False, help='tW Tag')
parser.add_argument('-r', '--regenerate', dest='regenerate', action='store_true',
help='Regenerate PDF even if exists')
args = parser.parse_args(sys.argv[1:])
main(args.obs_tn, args.obs, args.tw, args.ta, args.lang_codes, args.working_dir, args.output_dir, args.owner,
args.regenerate)
|
[] |
[] |
[
"OUTPUT_DIR",
"WORKING_DIR"
] |
[]
|
["OUTPUT_DIR", "WORKING_DIR"]
|
python
| 2 | 0 | |
workflow/controller/operator.go
|
package controller
import (
"encoding/json"
"fmt"
"math"
"os"
"reflect"
"regexp"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/argoproj/pkg/humanize"
argokubeerr "github.com/argoproj/pkg/kube/errors"
"github.com/argoproj/pkg/strftime"
jsonpatch "github.com/evanphx/json-patch"
log "github.com/sirupsen/logrus"
"github.com/valyala/fasttemplate"
apiv1 "k8s.io/api/core/v1"
policyv1beta "k8s.io/api/policy/v1beta1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo/config"
"github.com/argoproj/argo/errors"
"github.com/argoproj/argo/pkg/apis/workflow"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1"
"github.com/argoproj/argo/util"
envutil "github.com/argoproj/argo/util/env"
errorsutil "github.com/argoproj/argo/util/errors"
"github.com/argoproj/argo/util/intstr"
"github.com/argoproj/argo/util/resource"
"github.com/argoproj/argo/util/retry"
"github.com/argoproj/argo/workflow/common"
controllercache "github.com/argoproj/argo/workflow/controller/cache"
"github.com/argoproj/argo/workflow/controller/estimation"
"github.com/argoproj/argo/workflow/controller/indexes"
"github.com/argoproj/argo/workflow/metrics"
"github.com/argoproj/argo/workflow/progress"
argosync "github.com/argoproj/argo/workflow/sync"
"github.com/argoproj/argo/workflow/templateresolution"
wfutil "github.com/argoproj/argo/workflow/util"
"github.com/argoproj/argo/workflow/validate"
)
// wfOperationCtx is the context for evaluation and operation of a single workflow
type wfOperationCtx struct {
// wf is the workflow object. It should not be used in execution logic. woc.wfSpec should be used instead
wf *wfv1.Workflow
// orig is the original workflow object for purposes of creating a patch
orig *wfv1.Workflow
// updated indicates whether or not the workflow object itself was updated
// and needs to be persisted back to kubernetes
updated bool
// log is an logrus logging context to corralate logs with a workflow
log *log.Entry
// controller reference to workflow controller
controller *WorkflowController
// estimate duration
estimator estimation.Estimator
// globalParams holds any parameters that are available to be referenced
// in the global scope (e.g. workflow.parameters.XXX).
globalParams common.Parameters
// volumes holds a DeepCopy of wf.Spec.Volumes to perform substitutions.
// It is then used in addVolumeReferences() when creating a pod.
volumes []apiv1.Volume
// ArtifactRepository contains the default location of an artifact repository for container artifacts
artifactRepository *config.ArtifactRepository
// map of pods which need to be labeled with completed=true
completedPods map[string]bool
// map of pods which is identified as succeeded=true
succeededPods map[string]bool
// deadline is the dealine time in which this operation should relinquish
// its hold on the workflow so that an operation does not run for too long
// and starve other workqueue items. It also enables workflow progress to
// be periodically synced to the database.
deadline time.Time
// activePods tracks the number of active (Running/Pending) pods for controlling
// parallelism
activePods int64
// workflowDeadline is the deadline which the workflow is expected to complete before we
// terminate the workflow.
workflowDeadline *time.Time
eventRecorder record.EventRecorder
// preExecutionNodePhases contains the phases of all the nodes before the current operation. Necessary to infer
// changes in phase for metric emission
preExecutionNodePhases map[string]wfv1.NodePhase
// execWf holds the Workflow for use in execution.
// In Normal workflow scenario: It holds copy of workflow object
// In Submit From WorkflowTemplate: It holds merged workflow with WorkflowDefault, Workflow and WorkflowTemplate
// 'execWf.Spec' should usually be used instead `wf.Spec`, with two exceptions for user editable fields:
// 1. `wf.Spec.Suspend`
// 2. `wf.Spec.Shutdown`
execWf *wfv1.Workflow
}
var (
// ErrDeadlineExceeded indicates the operation exceeded its deadline for execution
ErrDeadlineExceeded = errors.New(errors.CodeTimeout, "Deadline exceeded")
// ErrParallelismReached indicates this workflow reached its parallelism limit
ErrParallelismReached = errors.New(errors.CodeForbidden, "Max parallelism reached")
// ErrTimeout indicates a specific template timed out
ErrTimeout = errors.New(errors.CodeTimeout, "timeout")
)
// maxOperationTime is the maximum time a workflow operation is allowed to run
// for before requeuing the workflow onto the workqueue.
var (
maxOperationTime = envutil.LookupEnvDurationOr("MAX_OPERATION_TIME", 30*time.Second)
defaultRequeueTime = envutil.LookupEnvDurationOr("DEFAULT_REQUEUE_TIME", maxOperationTime/2)
)
// failedNodeStatus is a subset of NodeStatus that is only used to Marshal certain fields into a JSON of failed nodes
type failedNodeStatus struct {
DisplayName string `json:"displayName"`
Message string `json:"message"`
TemplateName string `json:"templateName"`
Phase string `json:"phase"`
PodName string `json:"podName"`
FinishedAt metav1.Time `json:"finishedAt"`
}
// newWorkflowOperationCtx creates and initializes a new wfOperationCtx object.
func newWorkflowOperationCtx(wf *wfv1.Workflow, wfc *WorkflowController) *wfOperationCtx {
// NEVER modify objects from the store. It's a read-only, local cache.
// You can use DeepCopy() to make a deep copy of original object and modify this copy
// Or create a copy manually for better performance
wfCopy := wf.DeepCopyObject().(*wfv1.Workflow)
woc := wfOperationCtx{
wf: wfCopy,
orig: wf,
execWf: wfCopy,
updated: false,
log: log.WithFields(log.Fields{
"workflow": wf.ObjectMeta.Name,
"namespace": wf.ObjectMeta.Namespace,
}),
controller: wfc,
globalParams: make(map[string]string),
volumes: wf.Spec.DeepCopy().Volumes,
completedPods: make(map[string]bool),
succeededPods: make(map[string]bool),
deadline: time.Now().UTC().Add(maxOperationTime),
eventRecorder: wfc.eventRecorderManager.Get(wf.Namespace),
preExecutionNodePhases: make(map[string]wfv1.NodePhase),
}
if woc.wf.Status.Nodes == nil {
woc.wf.Status.Nodes = make(map[string]wfv1.NodeStatus)
}
if woc.wf.Status.StoredTemplates == nil {
woc.wf.Status.StoredTemplates = make(map[string]wfv1.Template)
}
return &woc
}
// operate is the main operator logic of a workflow. It evaluates the current state of the workflow,
// and its pods and decides how to proceed down the execution path.
// TODO: an error returned by this method should result in requeuing the workflow to be retried at a
// later time
// As you must not call `persistUpdates` twice, you must not call `operate` twice.
func (woc *wfOperationCtx) operate() {
defer func() {
if woc.wf.Status.Fulfilled() {
_ = woc.killDaemonedChildren("")
}
woc.persistUpdates()
}()
defer func() {
if r := recover(); r != nil {
woc.log.WithFields(log.Fields{"stack": string(debug.Stack()), "r": r}).Errorf("Recovered from panic")
if rerr, ok := r.(error); ok {
woc.markWorkflowError(rerr)
} else {
woc.markWorkflowPhase(wfv1.NodeError, fmt.Sprintf("%v", r))
}
woc.controller.metrics.OperationPanic()
}
}()
woc.log.Infof("Processing workflow")
// Set the Execute workflow spec for execution
// ExecWF is a runtime execution spec which merged from Wf, WFT and Wfdefault
err := woc.setExecWorkflow()
if err != nil {
woc.log.WithError(err).Errorf("Unable to get Workflow Template Reference for workflow")
woc.markWorkflowError(err)
return
}
if woc.wf.Status.ArtifactRepositoryRef == nil {
ref, err := woc.controller.artifactRepositories.Resolve(woc.execWf.Spec.ArtifactRepositoryRef, woc.wf.Namespace)
if err != nil {
woc.markWorkflowError(fmt.Errorf("failed to resolve artifact repository: %w", err))
return
}
woc.wf.Status.ArtifactRepositoryRef = ref
woc.updated = true
}
repo, err := woc.controller.artifactRepositories.Get(woc.wf.Status.ArtifactRepositoryRef)
if err != nil {
woc.markWorkflowError(fmt.Errorf("failed to get artifact repository: %v", err))
return
}
woc.artifactRepository = repo
// Workflow Level Synchronization lock
if woc.execWf.Spec.Synchronization != nil {
acquired, wfUpdate, msg, err := woc.controller.syncManager.TryAcquire(woc.wf, "", woc.execWf.Spec.Synchronization)
if err != nil {
woc.log.Warn("Failed to acquire the lock")
woc.markWorkflowFailed(fmt.Sprintf("Failed to acquire the synchronization lock. %s", err.Error()))
return
}
woc.updated = wfUpdate
if !acquired {
woc.log.Warn("Workflow processing has been postponed due to concurrency limit")
woc.wf.Status.Message = msg
return
}
}
// Update workflow duration variable
woc.globalParams[common.GlobalVarWorkflowDuration] = fmt.Sprintf("%f", time.Since(woc.wf.Status.StartedAt.Time).Seconds())
// Populate the phase of all the nodes prior to execution
for _, node := range woc.wf.Status.Nodes {
woc.preExecutionNodePhases[node.ID] = node.Phase
}
woc.setGlobalParameters(woc.execWf.Spec.Arguments)
// Perform one-time workflow validation
if woc.wf.Status.Phase == "" {
woc.markWorkflowRunning()
err := woc.createPDBResource()
if err != nil {
msg := fmt.Sprintf("Unable to create PDB resource for workflow, %s error: %s", woc.wf.Name, err)
woc.markWorkflowFailed(msg)
return
}
validateOpts := validate.ValidateOpts{ContainerRuntimeExecutor: woc.controller.GetContainerRuntimeExecutor()}
wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(woc.controller.wfclientset.ArgoprojV1alpha1().WorkflowTemplates(woc.wf.Namespace))
cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(woc.controller.wfclientset.ArgoprojV1alpha1().ClusterWorkflowTemplates())
// Validate the execution wfSpec
wfConditions, err := validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, woc.wf, validateOpts)
if err != nil {
msg := fmt.Sprintf("invalid spec: %s", err.Error())
woc.markWorkflowFailed(msg)
return
}
// If we received conditions during validation (such as SpecWarnings), add them to the Workflow object
if len(*wfConditions) > 0 {
woc.wf.Status.Conditions.JoinConditions(wfConditions)
woc.updated = true
}
woc.workflowDeadline = woc.getWorkflowDeadline()
// Workflow will not be requeued if workflow steps are in pending state.
// Workflow needs to requeue on its deadline,
if woc.workflowDeadline != nil {
woc.requeue(time.Until(*woc.workflowDeadline))
}
if woc.execWf.Spec.Metrics != nil {
realTimeScope := map[string]func() float64{common.GlobalVarWorkflowDuration: func() float64 {
return time.Since(woc.wf.Status.StartedAt.Time).Seconds()
}}
woc.computeMetrics(woc.execWf.Spec.Metrics.Prometheus, woc.globalParams, realTimeScope, true)
}
woc.wf.Status.EstimatedDuration = woc.estimateWorkflowDuration()
} else {
woc.workflowDeadline = woc.getWorkflowDeadline()
err := woc.podReconciliation()
if err == nil {
err = woc.failSuspendedAndPendingNodesAfterDeadlineOrShutdown()
}
if err != nil {
woc.log.WithError(err).WithField("workflow", woc.wf.ObjectMeta.Name).Error("workflow timeout")
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowTimedOut", "Workflow timed out")
// TODO: we need to re-add to the workqueue, but should happen in caller
return
}
}
if woc.wf.Spec.Suspend != nil && *woc.wf.Spec.Suspend {
woc.log.Infof("workflow suspended")
return
}
if woc.execWf.Spec.Parallelism != nil {
woc.activePods = woc.countActivePods()
}
// Create a starting template context.
tmplCtx, err := woc.createTemplateContext(wfv1.ResourceScopeLocal, "")
if err != nil {
woc.log.WithError(err).Error("Failed to create a template context")
woc.markWorkflowError(err)
return
}
err = woc.substituteParamsInVolumes(woc.globalParams)
if err != nil {
woc.log.WithError(err).Error("volumes global param substitution error")
woc.markWorkflowError(err)
return
}
err = woc.createPVCs()
if err != nil {
if errorsutil.IsTransientErr(err) {
// Error was most likely caused by a lack of resources.
// In this case, Workflow will be in pending state and requeue.
woc.markWorkflowPhase(wfv1.NodePending, fmt.Sprintf("Waiting for a PVC to be created. %v", err))
woc.requeue(defaultRequeueTime)
return
}
err = fmt.Errorf("pvc create error: %w", err)
woc.log.WithError(err).Error("pvc create error")
woc.markWorkflowError(err)
return
} else if woc.wf.Status.Phase == wfv1.NodePending {
// Workflow might be in pending state if previous PVC creation is forbidden
woc.markWorkflowRunning()
}
node, err := woc.executeTemplate(woc.wf.ObjectMeta.Name, &wfv1.WorkflowStep{Template: woc.execWf.Spec.Entrypoint}, tmplCtx, woc.execWf.Spec.Arguments, &executeTemplateOpts{})
if err != nil {
// the error are handled in the callee so just log it.
msg := "error in entry template execution"
woc.log.WithError(err).Error(msg)
msg = fmt.Sprintf("%s %s: %+v", woc.wf.Name, msg, err)
switch err {
case ErrDeadlineExceeded:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowTimedOut", msg)
}
return
}
if node == nil || !node.Fulfilled() {
// node can be nil if a workflow created immediately in a parallelism == 0 state
return
}
workflowStatus := node.Phase
var onExitNode *wfv1.NodeStatus
if woc.execWf.Spec.OnExit != "" && woc.wf.Spec.Shutdown.ShouldExecute(true) {
if workflowStatus == wfv1.NodeSkipped {
// treat skipped the same as Succeeded for workflow.status
woc.globalParams[common.GlobalVarWorkflowStatus] = string(wfv1.NodeSucceeded)
} else {
woc.globalParams[common.GlobalVarWorkflowStatus] = string(workflowStatus)
}
var failures []failedNodeStatus
for _, node := range woc.wf.Status.Nodes {
if node.Phase == wfv1.NodeFailed || node.Phase == wfv1.NodeError {
failures = append(failures,
failedNodeStatus{
DisplayName: node.DisplayName,
Message: node.Message,
TemplateName: node.TemplateName,
Phase: string(node.Phase),
PodName: node.ID,
FinishedAt: node.FinishedAt,
})
}
}
failedNodeBytes, err := json.Marshal(failures)
if err != nil {
woc.log.Errorf("Error marshalling failed nodes list: %+v", err)
// No need to return here
}
// This strconv.Quote is necessary so that the escaped quotes are not removed during parameter substitution
woc.globalParams[common.GlobalVarWorkflowFailures] = strconv.Quote(string(failedNodeBytes))
woc.log.Infof("Running OnExit handler: %s", woc.execWf.Spec.OnExit)
onExitNodeName := common.GenerateOnExitNodeName(woc.wf.ObjectMeta.Name)
onExitNode, err = woc.executeTemplate(onExitNodeName, &wfv1.WorkflowStep{Template: woc.execWf.Spec.OnExit}, tmplCtx, woc.execWf.Spec.Arguments, &executeTemplateOpts{onExitTemplate: true})
if err != nil {
// the error are handled in the callee so just log it.
woc.log.WithError(err).Error("error in exit template execution")
return
}
if onExitNode == nil || !onExitNode.Fulfilled() {
return
}
}
var workflowMessage string
if node.FailedOrError() && woc.execWf.Spec.Shutdown != "" {
workflowMessage = fmt.Sprintf("Stopped with strategy '%s'", woc.execWf.Spec.Shutdown)
} else {
workflowMessage = node.Message
}
// If we get here, the workflow completed, all PVCs were deleted successfully, and
// exit handlers were executed. We now need to infer the workflow phase from the
// node phase.
switch workflowStatus {
case wfv1.NodeSucceeded, wfv1.NodeSkipped:
if onExitNode != nil && onExitNode.FailedOrError() {
// if main workflow succeeded, but the exit node was unsuccessful
// the workflow is now considered unsuccessful.
woc.markWorkflowPhase(onExitNode.Phase, onExitNode.Message)
} else {
woc.markWorkflowSuccess()
}
case wfv1.NodeFailed:
woc.markWorkflowFailed(workflowMessage)
case wfv1.NodeError:
woc.markWorkflowPhase(wfv1.NodeError, workflowMessage)
default:
// NOTE: we should never make it here because if the node was 'Running' we should have
// returned earlier.
err = errors.InternalErrorf("Unexpected node phase %s: %+v", woc.wf.ObjectMeta.Name, err)
woc.markWorkflowError(err)
}
if woc.execWf.Spec.Metrics != nil {
realTimeScope := map[string]func() float64{common.GlobalVarWorkflowDuration: func() float64 {
return node.FinishedAt.Sub(node.StartedAt.Time).Seconds()
}}
woc.globalParams[common.GlobalVarWorkflowStatus] = string(workflowStatus)
woc.computeMetrics(woc.execWf.Spec.Metrics.Prometheus, woc.globalParams, realTimeScope, false)
}
err = woc.deletePVCs()
if err != nil {
woc.log.WithError(err).Warn("failed to delete PVCs")
}
}
func (woc *wfOperationCtx) getWorkflowDeadline() *time.Time {
if woc.execWf.Spec.ActiveDeadlineSeconds == nil {
return nil
}
if woc.wf.Status.StartedAt.IsZero() {
return nil
}
startedAt := woc.wf.Status.StartedAt.Truncate(time.Second)
deadline := startedAt.Add(time.Duration(*woc.execWf.Spec.ActiveDeadlineSeconds) * time.Second).UTC()
return &deadline
}
// setGlobalParameters sets the globalParam map with global parameters
func (woc *wfOperationCtx) setGlobalParameters(executionParameters wfv1.Arguments) {
woc.globalParams[common.GlobalVarWorkflowName] = woc.wf.ObjectMeta.Name
woc.globalParams[common.GlobalVarWorkflowNamespace] = woc.wf.ObjectMeta.Namespace
woc.globalParams[common.GlobalVarWorkflowServiceAccountName] = woc.execWf.Spec.ServiceAccountName
woc.globalParams[common.GlobalVarWorkflowUID] = string(woc.wf.ObjectMeta.UID)
woc.globalParams[common.GlobalVarWorkflowCreationTimestamp] = woc.wf.ObjectMeta.CreationTimestamp.Format(time.RFC3339)
if woc.execWf.Spec.Priority != nil {
woc.globalParams[common.GlobalVarWorkflowPriority] = strconv.Itoa(int(*woc.execWf.Spec.Priority))
}
for char := range strftime.FormatChars {
cTimeVar := fmt.Sprintf("%s.%s", common.GlobalVarWorkflowCreationTimestamp, string(char))
woc.globalParams[cTimeVar] = strftime.Format("%"+string(char), woc.wf.ObjectMeta.CreationTimestamp.Time)
}
if workflowParameters, err := json.Marshal(woc.execWf.Spec.Arguments.Parameters); err == nil {
woc.globalParams[common.GlobalVarWorkflowParameters] = string(workflowParameters)
}
for _, param := range executionParameters.Parameters {
woc.globalParams["workflow.parameters."+param.Name] = param.Value.String()
}
for k, v := range woc.wf.ObjectMeta.Annotations {
woc.globalParams["workflow.annotations."+k] = v
}
for k, v := range woc.wf.ObjectMeta.Labels {
woc.globalParams["workflow.labels."+k] = v
}
if woc.wf.Status.Outputs != nil {
for _, param := range woc.wf.Status.Outputs.Parameters {
woc.globalParams["workflow.outputs.parameters."+param.Name] = param.Value.String()
}
}
}
// persistUpdates will update a workflow with any updates made during workflow operation.
// It also labels any pods as completed if we have extracted everything we need from it.
// NOTE: a previous implementation used Patch instead of Update, but Patch does not work with
// the fake CRD clientset which makes unit testing extremely difficult.
func (woc *wfOperationCtx) persistUpdates() {
if !woc.updated {
return
}
resource.UpdateResourceDurations(woc.wf)
progress.UpdateProgress(woc.wf)
// You MUST not call `persistUpdates` twice.
// * Fails the `reapplyUpdate` cannot work unless resource versions are different.
// * It will double the number of Kubernetes API requests.
if woc.orig.ResourceVersion != woc.wf.ResourceVersion {
woc.log.Panic("cannot persist updates with mismatched resource versions")
}
wfClient := woc.controller.wfclientset.ArgoprojV1alpha1().Workflows(woc.wf.ObjectMeta.Namespace)
// try and compress nodes if needed
nodes := woc.wf.Status.Nodes
err := woc.controller.hydrator.Dehydrate(woc.wf)
if err != nil {
woc.log.Warnf("Failed to dehydrate: %v", err)
woc.markWorkflowError(err)
}
// Release all acquired lock for completed workflow
if woc.wf.Status.Synchronization != nil && woc.wf.Status.Fulfilled() {
if woc.controller.syncManager.ReleaseAll(woc.wf) {
log.WithFields(log.Fields{"key": woc.wf.Name}).Info("Released all acquired locks")
}
}
wf, err := wfClient.Update(woc.wf)
if err != nil {
woc.log.Warnf("Error updating workflow: %v %s", err, apierr.ReasonForError(err))
if argokubeerr.IsRequestEntityTooLargeErr(err) {
woc.persistWorkflowSizeLimitErr(wfClient, err)
return
}
if !apierr.IsConflict(err) {
return
}
woc.log.Info("Re-applying updates on latest version and retrying update")
wf, err := woc.reapplyUpdate(wfClient, nodes)
if err != nil {
woc.log.Infof("Failed to re-apply update: %+v", err)
return
}
woc.wf = wf
} else {
woc.wf = wf
woc.controller.hydrator.HydrateWithNodes(woc.wf, nodes)
}
if !woc.controller.hydrator.IsHydrated(woc.wf) {
panic("workflow should be hydrated")
}
woc.log.WithFields(log.Fields{"resourceVersion": woc.wf.ResourceVersion, "phase": woc.wf.Status.Phase}).Info("Workflow update successful")
if os.Getenv("INFORMER_WRITE_BACK") != "false" {
if err := woc.writeBackToInformer(); err != nil {
woc.markWorkflowError(err)
return
}
} else {
time.Sleep(enoughTimeForInformerSync)
}
// It is important that we *never* label pods as completed until we successfully updated the workflow
// Failing to do so means we can have inconsistent state.
// TODO: The completedPods will be labeled multiple times. I think it would be improved in the future.
// Send succeeded pods or completed pods to gcPods channel to delete it later depend on the PodGCStrategy.
// Notice we do not need to label the pod if we will delete it later for GC. Otherwise, that may even result in
// errors if we label a pod that was deleted already.
if woc.execWf.Spec.PodGC != nil {
switch woc.execWf.Spec.PodGC.Strategy {
case wfv1.PodGCOnPodSuccess:
for podName := range woc.succeededPods {
woc.controller.gcPods <- fmt.Sprintf("%s/%s", woc.wf.ObjectMeta.Namespace, podName)
}
case wfv1.PodGCOnPodCompletion:
for podName := range woc.completedPods {
woc.controller.gcPods <- fmt.Sprintf("%s/%s", woc.wf.ObjectMeta.Namespace, podName)
}
}
} else {
// label pods which will not be deleted
for podName := range woc.completedPods {
woc.controller.completedPods <- fmt.Sprintf("%s/%s", woc.wf.ObjectMeta.Namespace, podName)
}
}
}
func (woc *wfOperationCtx) writeBackToInformer() error {
un, err := wfutil.ToUnstructured(woc.wf)
if err != nil {
return fmt.Errorf("failed to convert workflow to unstructured: %w", err)
}
err = woc.controller.wfInformer.GetStore().Update(un)
if err != nil {
return fmt.Errorf("failed to update informer store: %w", err)
}
return nil
}
// persistWorkflowSizeLimitErr will fail a the workflow with an error when we hit the resource size limit
// See https://github.com/argoproj/argo/issues/913
func (woc *wfOperationCtx) persistWorkflowSizeLimitErr(wfClient v1alpha1.WorkflowInterface, err error) {
woc.wf = woc.orig.DeepCopy()
woc.markWorkflowError(err)
_, err = wfClient.Update(woc.wf)
if err != nil {
woc.log.Warnf("Error updating workflow with size error: %v", err)
}
}
// reapplyUpdate GETs the latest version of the workflow, re-applies the updates and
// retries the UPDATE multiple times. For reasoning behind this technique, see:
// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
func (woc *wfOperationCtx) reapplyUpdate(wfClient v1alpha1.WorkflowInterface, nodes wfv1.Nodes) (*wfv1.Workflow, error) {
// if this condition is true, then this func will always error
if woc.orig.ResourceVersion != woc.wf.ResourceVersion {
woc.log.Panic("cannot re-apply update with mismatched resource versions")
}
err := woc.controller.hydrator.Hydrate(woc.orig)
if err != nil {
return nil, err
}
// First generate the patch
oldData, err := json.Marshal(woc.orig)
if err != nil {
return nil, err
}
woc.controller.hydrator.HydrateWithNodes(woc.wf, nodes)
newData, err := json.Marshal(woc.wf)
if err != nil {
return nil, err
}
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
if err != nil {
return nil, err
}
// Next get latest version of the workflow, apply the patch and retry the update
attempt := 1
for {
currWf, err := wfClient.Get(woc.wf.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
err = woc.controller.hydrator.Hydrate(currWf)
if err != nil {
return nil, err
}
currWfBytes, err := json.Marshal(currWf)
if err != nil {
return nil, err
}
newWfBytes, err := jsonpatch.MergePatch(currWfBytes, patchBytes)
if err != nil {
return nil, err
}
var newWf wfv1.Workflow
err = json.Unmarshal(newWfBytes, &newWf)
if err != nil {
return nil, err
}
err = woc.controller.hydrator.Dehydrate(&newWf)
if err != nil {
return nil, err
}
wf, err := wfClient.Update(&newWf)
if err == nil {
woc.log.Infof("Update retry attempt %d successful", attempt)
woc.controller.hydrator.HydrateWithNodes(wf, nodes)
return wf, nil
}
attempt++
woc.log.Warnf("Update retry attempt %d failed: %v", attempt, err)
if attempt > 5 {
return nil, err
}
}
}
// requeue this workflow onto the workqueue for later processing
func (woc *wfOperationCtx) requeue(afterDuration time.Duration) {
key, err := cache.MetaNamespaceKeyFunc(woc.wf)
if err != nil {
woc.log.Errorf("Failed to requeue workflow %s: %v", woc.wf.ObjectMeta.Name, err)
return
}
woc.controller.wfQueue.AddAfter(key, afterDuration)
}
// processNodeRetries updates the retry node state based on the child node state and the retry strategy and returns the node.
func (woc *wfOperationCtx) processNodeRetries(node *wfv1.NodeStatus, retryStrategy wfv1.RetryStrategy, opts *executeTemplateOpts) (*wfv1.NodeStatus, bool, error) {
if node.Fulfilled() {
return node, true, nil
}
lastChildNode := getChildNodeIndex(node, woc.wf.Status.Nodes, -1)
if lastChildNode == nil {
return node, true, nil
}
if !lastChildNode.Fulfilled() {
// last child node is still running.
return node, true, nil
}
if !lastChildNode.FailedOrError() {
node.Outputs = lastChildNode.Outputs.DeepCopy()
woc.wf.Status.Nodes[node.ID] = *node
return woc.markNodePhase(node.Name, wfv1.NodeSucceeded), true, nil
}
if woc.execWf.Spec.Shutdown != "" || (woc.workflowDeadline != nil && time.Now().UTC().After(*woc.workflowDeadline)) {
var message string
if woc.execWf.Spec.Shutdown != "" {
message = fmt.Sprintf("Stopped with strategy '%s'", woc.execWf.Spec.Shutdown)
} else {
message = fmt.Sprintf("retry exceeded workflow deadline %s", *woc.workflowDeadline)
}
woc.log.Infoln(message)
return woc.markNodePhase(node.Name, lastChildNode.Phase, message), true, nil
}
if retryStrategy.Backoff != nil {
maxDurationDeadline := time.Time{}
// Process max duration limit
if retryStrategy.Backoff.MaxDuration != "" && len(node.Children) > 0 {
maxDuration, err := parseStringToDuration(retryStrategy.Backoff.MaxDuration)
if err != nil {
return nil, false, err
}
firstChildNode := getChildNodeIndex(node, woc.wf.Status.Nodes, 0)
maxDurationDeadline = firstChildNode.StartedAt.Add(maxDuration)
if time.Now().After(maxDurationDeadline) {
woc.log.Infoln("Max duration limit exceeded. Failing...")
return woc.markNodePhase(node.Name, lastChildNode.Phase, "Max duration limit exceeded"), true, nil
}
}
// Max duration limit hasn't been exceeded, process back off
if retryStrategy.Backoff.Duration == "" {
return nil, false, fmt.Errorf("no base duration specified for retryStrategy")
}
baseDuration, err := parseStringToDuration(retryStrategy.Backoff.Duration)
if err != nil {
return nil, false, err
}
timeToWait := baseDuration
retryStrategyBackoffFactor, err := intstr.Int32(retryStrategy.Backoff.Factor)
if err != nil {
return nil, false, err
}
if retryStrategyBackoffFactor != nil && *retryStrategyBackoffFactor > 0 {
// Formula: timeToWait = duration * factor^retry_number
// Note that timeToWait should equal to duration for the first retry attempt.
timeToWait = baseDuration * time.Duration(math.Pow(float64(*retryStrategyBackoffFactor), float64(len(node.Children)-1)))
}
waitingDeadline := lastChildNode.FinishedAt.Add(timeToWait)
// If the waiting deadline is after the max duration deadline, then it's futile to wait until then. Stop early
if !maxDurationDeadline.IsZero() && waitingDeadline.After(maxDurationDeadline) {
woc.log.Infoln("Backoff would exceed max duration limit. Failing...")
return woc.markNodePhase(node.Name, lastChildNode.Phase, "Backoff would exceed max duration limit"), true, nil
}
// See if we have waited past the deadline
if time.Now().Before(waitingDeadline) {
woc.requeue(timeToWait)
retryMessage := fmt.Sprintf("Backoff for %s", humanize.Duration(timeToWait))
return woc.markNodePhase(node.Name, node.Phase, retryMessage), false, nil
}
woc.log.WithField("node", node.Name).Infof("node has maxDuration set, setting executionDeadline to: %s", humanize.Timestamp(maxDurationDeadline))
opts.executionDeadline = maxDurationDeadline
node = woc.markNodePhase(node.Name, node.Phase, "")
}
var retryOnFailed bool
var retryOnError bool
switch retryStrategy.RetryPolicy {
case wfv1.RetryPolicyAlways:
retryOnFailed = true
retryOnError = true
case wfv1.RetryPolicyOnError:
retryOnFailed = false
retryOnError = true
case wfv1.RetryPolicyOnFailure, "":
retryOnFailed = true
retryOnError = false
default:
return nil, false, fmt.Errorf("%s is not a valid RetryPolicy", retryStrategy.RetryPolicy)
}
if (lastChildNode.Phase == wfv1.NodeFailed && !retryOnFailed) || (lastChildNode.Phase == wfv1.NodeError && !retryOnError) {
woc.log.Infof("Node not set to be retried after status: %s", lastChildNode.Phase)
return woc.markNodePhase(node.Name, lastChildNode.Phase, lastChildNode.Message), true, nil
}
if !lastChildNode.CanRetry() {
woc.log.Infof("Node cannot be retried. Marking it failed")
return woc.markNodePhase(node.Name, lastChildNode.Phase, lastChildNode.Message), true, nil
}
limit, err := intstr.Int32(retryStrategy.Limit)
if err != nil {
return nil, false, err
}
if retryStrategy.Limit != nil && limit != nil && int32(len(node.Children)) > *limit {
woc.log.Infoln("No more retries left. Failing...")
return woc.markNodePhase(node.Name, lastChildNode.Phase, "No more retries left"), true, nil
}
woc.log.Infof("%d child nodes of %s failed. Trying again...", len(node.Children), node.Name)
return node, true, nil
}
// podReconciliation is the process by which a workflow will examine all its related
// pods and update the node state before continuing the evaluation of the workflow.
// Records all pods which were observed completed, which will be labeled completed=true
// after successful persist of the workflow.
func (woc *wfOperationCtx) podReconciliation() error {
podList, err := woc.getAllWorkflowPods()
if err != nil {
return err
}
seenPods := make(map[string]*apiv1.Pod)
seenPodLock := &sync.Mutex{}
wfNodesLock := &sync.RWMutex{}
performAssessment := func(pod *apiv1.Pod) {
if pod == nil {
return
}
nodeNameForPod := pod.Annotations[common.AnnotationKeyNodeName]
nodeID := woc.wf.NodeID(nodeNameForPod)
seenPodLock.Lock()
seenPods[nodeID] = pod
seenPodLock.Unlock()
wfNodesLock.Lock()
defer wfNodesLock.Unlock()
if node, ok := woc.wf.Status.Nodes[nodeID]; ok {
if newState := woc.assessNodeStatus(pod, &node); newState != nil {
woc.wf.Status.Nodes[nodeID] = *newState
woc.addOutputsToGlobalScope(node.Outputs)
if node.MemoizationStatus != nil {
c := woc.controller.cacheFactory.GetCache(controllercache.ConfigMapCache, node.MemoizationStatus.CacheName)
err := c.Save(node.MemoizationStatus.Key, node.ID, node.Outputs)
if err != nil {
woc.log.WithFields(log.Fields{"nodeID": node.ID}).WithError(err).Error("Failed to save node outputs to cache")
node.Phase = wfv1.NodeError
}
}
woc.updated = true
}
node := woc.wf.Status.Nodes[pod.ObjectMeta.Name]
if node.Fulfilled() && !node.IsDaemoned() {
if tmpVal, tmpOk := pod.Labels[common.LabelKeyCompleted]; tmpOk {
if tmpVal == "true" {
return
}
}
woc.completedPods[pod.ObjectMeta.Name] = true
if woc.shouldPrintPodSpec(node) {
printPodSpecLog(pod, woc.wf.Name)
}
if !woc.orig.Status.Nodes[node.ID].Fulfilled() {
woc.onNodeComplete(&node)
}
}
if node.Succeeded() {
woc.succeededPods[pod.ObjectMeta.Name] = true
}
}
}
parallelPodNum := make(chan string, 500)
var wg sync.WaitGroup
for _, pod := range podList {
parallelPodNum <- pod.Name
wg.Add(1)
go func(pod *apiv1.Pod) {
defer wg.Done()
performAssessment(pod)
err = woc.applyExecutionControl(pod, wfNodesLock)
if err != nil {
woc.log.Warnf("Failed to apply execution control to pod %s", pod.Name)
}
<-parallelPodNum
}(pod)
}
wg.Wait()
// Now check for deleted pods. Iterate our nodes. If any one of our nodes does not show up in
// the seen list it implies that the pod was deleted without the controller seeing the event.
// It is now impossible to infer pod status. We can do at this point is to mark the node with Error, or
// we can re-submit it.
for nodeID, node := range woc.wf.Status.Nodes {
if node.Type != wfv1.NodeTypePod || node.Fulfilled() || node.StartedAt.IsZero() {
// node is not a pod, it is already complete, or it can be re-run.
continue
}
if _, ok := seenPods[nodeID]; !ok {
// grace-period to allow informer sync
recentlyStarted := recentlyStarted(node)
woc.log.WithFields(log.Fields{"nodeName": node.Name, "nodePhase": node.Phase, "recentlyStarted": recentlyStarted}).Info("Workflow pod is missing")
metrics.PodMissingMetric.WithLabelValues(strconv.FormatBool(recentlyStarted), string(node.Phase)).Inc()
// If the node is pending and the pod does not exist, it could be the case that we want to try to submit it
// again instead of marking it as an error. Check if that's the case.
if node.Pending() || recentlyStarted {
continue
}
woc.markNodePhase(node.Name, wfv1.NodeError, "pod deleted")
} else {
// At this point we are certain that the pod associated with our node is running or has been run;
// it is safe to extract the k8s-node information given this knowledge.
if node.HostNodeName != seenPods[nodeID].Spec.NodeName {
node.HostNodeName = seenPods[nodeID].Spec.NodeName
woc.wf.Status.Nodes[nodeID] = node
woc.updated = true
}
}
}
return nil
}
func recentlyStarted(node wfv1.NodeStatus) bool {
return time.Since(node.StartedAt.Time) <= envutil.LookupEnvDurationOr("RECENTLY_STARTED_POD_DURATION", 10*time.Second)
}
// shouldPrintPodSpec return eligible to print to the pod spec
func (woc *wfOperationCtx) shouldPrintPodSpec(node wfv1.NodeStatus) bool {
return woc.controller.Config.PodSpecLogStrategy.AllPods ||
(woc.controller.Config.PodSpecLogStrategy.FailedPod && node.FailedOrError())
}
//fails any suspended and pending nodes if the workflow deadline has passed
func (woc *wfOperationCtx) failSuspendedAndPendingNodesAfterDeadlineOrShutdown() error {
deadlineExceeded := woc.workflowDeadline != nil && time.Now().UTC().After(*woc.workflowDeadline)
if woc.execWf.Spec.Shutdown != "" || deadlineExceeded {
for _, node := range woc.wf.Status.Nodes {
if node.IsActiveSuspendNode() || (node.Phase == wfv1.NodePending && deadlineExceeded) {
var message string
if woc.execWf.Spec.Shutdown != "" {
message = fmt.Sprintf("Stopped with strategy '%s'", woc.execWf.Spec.Shutdown)
} else {
message = "Step exceeded its deadline"
}
woc.markNodePhase(node.Name, wfv1.NodeFailed, message)
}
}
}
return nil
}
// countActivePods counts the number of active (Pending/Running) pods.
// Optionally restricts it to a template invocation (boundaryID)
func (woc *wfOperationCtx) countActivePods(boundaryIDs ...string) int64 {
var boundaryID = ""
if len(boundaryIDs) > 0 {
boundaryID = boundaryIDs[0]
}
var activePods int64
// if we care about parallelism, count the active pods at the template level
for _, node := range woc.wf.Status.Nodes {
if node.Type != wfv1.NodeTypePod {
continue
}
if boundaryID != "" && node.BoundaryID != boundaryID {
continue
}
switch node.Phase {
case wfv1.NodePending, wfv1.NodeRunning:
if node.SynchronizationStatus != nil && node.SynchronizationStatus.Waiting != "" {
// Do not include pending nodes that are waiting for a lock
continue
}
activePods++
}
}
return activePods
}
// countActiveChildren counts the number of active (Pending/Running) children nodes of parent parentName
func (woc *wfOperationCtx) countActiveChildren(boundaryIDs ...string) int64 {
var boundaryID = ""
if len(boundaryIDs) > 0 {
boundaryID = boundaryIDs[0]
}
var activeChildren int64
// if we care about parallelism, count the active pods at the template level
for _, node := range woc.wf.Status.Nodes {
if boundaryID != "" && node.BoundaryID != boundaryID {
continue
}
switch node.Type {
case wfv1.NodeTypePod, wfv1.NodeTypeSteps, wfv1.NodeTypeDAG:
default:
continue
}
switch node.Phase {
case wfv1.NodePending, wfv1.NodeRunning:
activeChildren++
}
}
return activeChildren
}
// getAllWorkflowPods returns all pods related to the current workflow
func (woc *wfOperationCtx) getAllWorkflowPods() ([]*apiv1.Pod, error) {
objs, err := woc.controller.podInformer.GetIndexer().ByIndex(indexes.WorkflowIndex, indexes.WorkflowIndexValue(woc.wf.Namespace, woc.wf.Name))
if err != nil {
return nil, err
}
pods := make([]*apiv1.Pod, len(objs))
for i, obj := range objs {
pod, ok := obj.(*apiv1.Pod)
if !ok {
return nil, fmt.Errorf("expected \"*apiv1.Pod\", got \"%v\"", reflect.TypeOf(obj).String())
}
pods[i] = pod
}
return pods, nil
}
func printPodSpecLog(pod *apiv1.Pod, wfName string) {
podSpecByte, err := json.Marshal(pod)
if err != nil {
log.WithField("workflow", wfName).WithField("nodename", pod.Name).WithField("namespace", pod.Namespace).Warnf("Unable to mashal pod spec. %v", err)
}
log.WithField("workflow", wfName).WithField("nodename", pod.Name).WithField("namespace", pod.Namespace).Infof("Pod Spec: %s", string(podSpecByte))
}
// assessNodeStatus compares the current state of a pod with its corresponding node
// and returns the new node status if something changed
func (woc *wfOperationCtx) assessNodeStatus(pod *apiv1.Pod, node *wfv1.NodeStatus) *wfv1.NodeStatus {
var newPhase wfv1.NodePhase
var newDaemonStatus *bool
var message string
updated := false
switch pod.Status.Phase {
case apiv1.PodPending:
newPhase = wfv1.NodePending
newDaemonStatus = pointer.BoolPtr(false)
message = getPendingReason(pod)
case apiv1.PodSucceeded:
newPhase = wfv1.NodeSucceeded
newDaemonStatus = pointer.BoolPtr(false)
case apiv1.PodFailed:
// ignore pod failure for daemoned steps
if node.IsDaemoned() {
newPhase = wfv1.NodeSucceeded
} else {
newPhase, message = inferFailedReason(pod)
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Infof("Pod failed")
}
newDaemonStatus = pointer.BoolPtr(false)
case apiv1.PodRunning:
if pod.DeletionTimestamp != nil {
// pod is being terminated
newPhase = wfv1.NodeError
message = "pod deleted during operation"
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Error(message)
} else {
newPhase = wfv1.NodeRunning
tmplStr, ok := pod.Annotations[common.AnnotationKeyTemplate]
if !ok {
log.WithField("pod", pod.ObjectMeta.Name).Warn("missing template annotation")
return nil
}
var tmpl wfv1.Template
err := json.Unmarshal([]byte(tmplStr), &tmpl)
if err != nil {
log.WithError(err).WithField("pod", pod.ObjectMeta.Name).Warn("template annotation unreadable")
return nil
}
if tmpl.Daemon != nil && *tmpl.Daemon {
// pod is running and template is marked daemon. check if everything is ready
for _, ctrStatus := range pod.Status.ContainerStatuses {
if !ctrStatus.Ready {
return nil
}
}
// proceed to mark node status as running (and daemoned)
newPhase = wfv1.NodeRunning
newDaemonStatus = pointer.BoolPtr(true)
log.Infof("Processing ready daemon pod: %v", pod.ObjectMeta.SelfLink)
}
}
default:
newPhase = wfv1.NodeError
message = fmt.Sprintf("Unexpected pod phase for %s: %s", pod.ObjectMeta.Name, pod.Status.Phase)
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Error(message)
}
if newDaemonStatus != nil {
if !*newDaemonStatus {
// if the daemon status switched to false, we prefer to just unset daemoned status field
// (as opposed to setting it to false)
newDaemonStatus = nil
}
if (newDaemonStatus != nil && node.Daemoned == nil) || (newDaemonStatus == nil && node.Daemoned != nil) {
log.Infof("Setting node %v daemoned: %v -> %v", node.ID, node.Daemoned, newDaemonStatus)
node.Daemoned = newDaemonStatus
updated = true
if pod.Status.PodIP != "" && pod.Status.PodIP != node.PodIP {
// only update Pod IP for daemoned nodes to reduce number of updates
log.Infof("Updating daemon node %s IP %s -> %s", node.ID, node.PodIP, pod.Status.PodIP)
node.PodIP = pod.Status.PodIP
}
}
}
outputStr, ok := pod.Annotations[common.AnnotationKeyOutputs]
if ok && node.Outputs == nil {
updated = true
log.Infof("Setting node %v outputs", node.ID)
var outputs wfv1.Outputs
err := json.Unmarshal([]byte(outputStr), &outputs)
if err != nil {
woc.log.WithField("displayName", node.DisplayName).WithField("templateName", node.TemplateName).
WithField("pod", pod.Name).Errorf("Failed to unmarshal %s outputs from pod annotation: %v", pod.Name, err)
node.Phase = wfv1.NodeError
} else {
node.Outputs = &outputs
}
}
if node.Phase != newPhase {
log.Infof("Updating node %s status %s -> %s", node.ID, node.Phase, newPhase)
// if we are transitioning from Pending to a different state, clear out pending message
if node.Phase == wfv1.NodePending {
node.Message = ""
}
updated = true
node.Phase = newPhase
}
if message != "" && node.Message != message {
log.Infof("Updating node %s message: %s", node.ID, message)
updated = true
node.Message = message
}
if node.Fulfilled() && node.FinishedAt.IsZero() {
updated = true
if !node.IsDaemoned() {
node.FinishedAt = getLatestFinishedAt(pod)
}
if node.FinishedAt.IsZero() {
// If we get here, the container is daemoned so the
// finishedAt might not have been set.
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
}
node.ResourcesDuration = resource.DurationForPod(pod)
}
if updated {
return node
}
return nil
}
// getLatestFinishedAt returns the latest finishAt timestamp from all the
// containers of this pod.
func getLatestFinishedAt(pod *apiv1.Pod) metav1.Time {
var latest metav1.Time
for _, ctr := range pod.Status.InitContainerStatuses {
if ctr.State.Terminated != nil && ctr.State.Terminated.FinishedAt.After(latest.Time) {
latest = ctr.State.Terminated.FinishedAt
}
}
for _, ctr := range pod.Status.ContainerStatuses {
if ctr.State.Terminated != nil && ctr.State.Terminated.FinishedAt.After(latest.Time) {
latest = ctr.State.Terminated.FinishedAt
}
}
return latest
}
func getPendingReason(pod *apiv1.Pod) string {
for _, ctrStatus := range pod.Status.ContainerStatuses {
if ctrStatus.State.Waiting != nil {
if ctrStatus.State.Waiting.Message != "" {
return fmt.Sprintf("%s: %s", ctrStatus.State.Waiting.Reason, ctrStatus.State.Waiting.Message)
}
return ctrStatus.State.Waiting.Reason
}
}
// Example:
// - lastProbeTime: null
// lastTransitionTime: 2018-08-29T06:38:36Z
// message: '0/3 nodes are available: 2 Insufficient cpu, 3 MatchNodeSelector.'
// reason: Unschedulable
// status: "False"
// type: PodScheduled
for _, cond := range pod.Status.Conditions {
if cond.Reason == apiv1.PodReasonUnschedulable {
if cond.Message != "" {
return fmt.Sprintf("%s: %s", cond.Reason, cond.Message)
}
return cond.Reason
}
}
return ""
}
// inferFailedReason returns metadata about a Failed pod to be used in its NodeStatus
// Returns a tuple of the new phase and message
func inferFailedReason(pod *apiv1.Pod) (wfv1.NodePhase, string) {
if pod.Status.Message != "" {
// Pod has a nice error message. Use that.
return wfv1.NodeFailed, pod.Status.Message
}
annotatedMsg := pod.Annotations[common.AnnotationKeyNodeMessage]
// We only get one message to set for the overall node status.
// If multiple containers failed, in order of preference:
// init, main (annotated), main (exit code), wait, sidecars
for _, ctr := range pod.Status.InitContainerStatuses {
// Virtual Kubelet environment will not set the terminate on waiting container
// https://github.com/argoproj/argo/issues/3879
// https://github.com/virtual-kubelet/virtual-kubelet/blob/7f2a02291530d2df14905702e6d51500dd57640a/node/sync.go#L195-L208
if ctr.State.Waiting != nil {
return wfv1.NodeError, fmt.Sprintf("Pod failed before %s container starts", ctr.Name)
}
if ctr.State.Terminated == nil {
// We should never get here
log.Warnf("Pod %s phase was Failed but %s did not have terminated state", pod.ObjectMeta.Name, ctr.Name)
continue
}
if ctr.State.Terminated.ExitCode == 0 {
continue
}
errMsg := "failed to load artifacts"
for _, msg := range []string{annotatedMsg, ctr.State.Terminated.Message} {
if msg != "" {
errMsg += ": " + msg
break
}
}
// NOTE: we consider artifact load issues as Error instead of Failed
return wfv1.NodeError, errMsg
}
failMessages := make(map[string]string)
for _, ctr := range pod.Status.ContainerStatuses {
// Virtual Kubelet environment will not set the terminate on waiting container
// https://github.com/argoproj/argo/issues/3879
// https://github.com/virtual-kubelet/virtual-kubelet/blob/7f2a02291530d2df14905702e6d51500dd57640a/node/sync.go#L195-L208
if ctr.State.Waiting != nil {
return wfv1.NodeError, fmt.Sprintf("Pod failed before %s container starts", ctr.Name)
}
if ctr.State.Terminated == nil {
// We should never get here
log.Warnf("Pod %s phase was Failed but %s did not have terminated state", pod.ObjectMeta.Name, ctr.Name)
continue
}
if ctr.State.Terminated.ExitCode == 0 {
continue
}
if ctr.Name == common.WaitContainerName {
errDetails := ""
for _, msg := range []string{annotatedMsg, ctr.State.Terminated.Message} {
if msg != "" {
errDetails = msg
break
}
}
if errDetails == "" {
// executor is expected to annotate a message to the pod upon any errors.
// If we failed to see the annotated message, it is likely the pod ran with
// insufficient privileges. Give a hint to that effect.
errDetails = fmt.Sprintf("verify serviceaccount %s:%s has necessary privileges", pod.ObjectMeta.Namespace, pod.Spec.ServiceAccountName)
}
errMsg := fmt.Sprintf("failed to save outputs: %s", errDetails)
failMessages[ctr.Name] = errMsg
continue
}
if ctr.State.Terminated.Message != "" {
errMsg := ctr.State.Terminated.Message
if ctr.Name != common.MainContainerName {
errMsg = fmt.Sprintf("sidecar '%s' %s", ctr.Name, errMsg)
}
failMessages[ctr.Name] = errMsg
continue
}
if ctr.State.Terminated.Reason == "OOMKilled" {
failMessages[ctr.Name] = ctr.State.Terminated.Reason
continue
}
errMsg := fmt.Sprintf("failed with exit code %d", ctr.State.Terminated.ExitCode)
if ctr.Name != common.MainContainerName {
if ctr.State.Terminated.ExitCode == 137 || ctr.State.Terminated.ExitCode == 143 {
// if the sidecar was SIGKILL'd (exit code 137) assume it was because argoexec
// forcibly killed the container, which we ignore the error for.
// Java code 143 is a normal exit 128 + 15 https://github.com/elastic/elasticsearch/issues/31847
log.Infof("Ignoring %d exit code of sidecar '%s'", ctr.State.Terminated.ExitCode, ctr.Name)
continue
}
errMsg = fmt.Sprintf("sidecar '%s' %s", ctr.Name, errMsg)
}
failMessages[ctr.Name] = errMsg
}
if failMsg, ok := failMessages[common.MainContainerName]; ok {
_, ok = failMessages[common.WaitContainerName]
isResourceTemplate := !ok
if isResourceTemplate && annotatedMsg != "" {
// For resource templates, we prefer the annotated message
// over the vanilla exit code 1 error
return wfv1.NodeFailed, annotatedMsg
}
return wfv1.NodeFailed, failMsg
}
if failMsg, ok := failMessages[common.WaitContainerName]; ok {
return wfv1.NodeError, failMsg
}
// If we get here, both the main and wait container succeeded. Iterate the fail messages to
// identify the sidecar which failed and return the message.
for _, failMsg := range failMessages {
return wfv1.NodeFailed, failMsg
}
// If we get here, we have detected that the main/wait containers succeed but the sidecar(s)
// were SIGKILL'd. The executor may have had to forcefully terminate the sidecar (kill -9),
// resulting in a 137 exit code (which we had ignored earlier). If failMessages is empty, it
// indicates that this is the case and we return Success instead of Failure.
return wfv1.NodeSucceeded, ""
}
func (woc *wfOperationCtx) createPVCs() error {
if !(woc.wf.Status.Phase == wfv1.NodePending || woc.wf.Status.Phase == wfv1.NodeRunning) {
// Only attempt to create PVCs if workflow is in Pending or Running state
// (e.g. passed validation, or didn't already complete)
return nil
}
if len(woc.execWf.Spec.VolumeClaimTemplates) == len(woc.wf.Status.PersistentVolumeClaims) {
// If we have already created the PVCs, then there is nothing to do.
// This will also handle the case where workflow has no volumeClaimTemplates.
return nil
}
pvcClient := woc.controller.kubeclientset.CoreV1().PersistentVolumeClaims(woc.wf.ObjectMeta.Namespace)
for i, pvcTmpl := range woc.execWf.Spec.VolumeClaimTemplates {
if pvcTmpl.ObjectMeta.Name == "" {
return errors.Errorf(errors.CodeBadRequest, "volumeClaimTemplates[%d].metadata.name is required", i)
}
pvcTmpl = *pvcTmpl.DeepCopy()
// PVC name will be <workflowname>-<volumeclaimtemplatename>
refName := pvcTmpl.ObjectMeta.Name
pvcName := fmt.Sprintf("%s-%s", woc.wf.ObjectMeta.Name, pvcTmpl.ObjectMeta.Name)
woc.log.Infof("Creating pvc %s", pvcName)
pvcTmpl.ObjectMeta.Name = pvcName
if pvcTmpl.ObjectMeta.Labels == nil {
pvcTmpl.ObjectMeta.Labels = make(map[string]string)
}
pvcTmpl.ObjectMeta.Labels[common.LabelKeyWorkflow] = woc.wf.ObjectMeta.Name
pvcTmpl.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind)),
}
pvc, err := pvcClient.Create(&pvcTmpl)
if err != nil && apierr.IsAlreadyExists(err) {
woc.log.WithField("pvc", pvcTmpl.Name).Info("pvc already exists. Workflow is re-using it")
pvc, err = pvcClient.Get(pvcTmpl.Name, metav1.GetOptions{})
if err != nil {
return err
}
hasOwnerReference := false
for i := range pvc.OwnerReferences {
ownerRef := pvc.OwnerReferences[i]
if ownerRef.UID == woc.wf.UID {
hasOwnerReference = true
break
}
}
if !hasOwnerReference {
return errors.Errorf(errors.CodeForbidden, "%s pvc already exists with different ownerreference", pvcTmpl.Name)
}
}
//continue
if err != nil {
return err
}
vol := apiv1.Volume{
Name: refName,
VolumeSource: apiv1.VolumeSource{
PersistentVolumeClaim: &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.ObjectMeta.Name,
},
},
}
woc.wf.Status.PersistentVolumeClaims = append(woc.wf.Status.PersistentVolumeClaims, vol)
woc.updated = true
}
return nil
}
func (woc *wfOperationCtx) deletePVCs() error {
gcStrategy := woc.wf.Spec.GetVolumeClaimGC().GetStrategy()
switch gcStrategy {
case wfv1.VolumeClaimGCOnSuccess:
if woc.wf.Status.Phase == wfv1.NodeError || woc.wf.Status.Phase == wfv1.NodeFailed {
// Skip deleting PVCs to reuse them for retried failed/error workflows.
// PVCs are automatically deleted when corresponded owner workflows get deleted.
return nil
}
case wfv1.VolumeClaimGCOnCompletion:
default:
return fmt.Errorf("unknown volume gc strategy: %s", gcStrategy)
}
totalPVCs := len(woc.wf.Status.PersistentVolumeClaims)
if totalPVCs == 0 {
// PVC list already empty. nothing to do
return nil
}
pvcClient := woc.controller.kubeclientset.CoreV1().PersistentVolumeClaims(woc.wf.ObjectMeta.Namespace)
newPVClist := make([]apiv1.Volume, 0)
// Attempt to delete all PVCs. Record first error encountered
var firstErr error
for _, pvc := range woc.wf.Status.PersistentVolumeClaims {
woc.log.Infof("Deleting PVC %s", pvc.PersistentVolumeClaim.ClaimName)
err := pvcClient.Delete(pvc.PersistentVolumeClaim.ClaimName, nil)
if err != nil {
if !apierr.IsNotFound(err) {
woc.log.Errorf("Failed to delete pvc %s: %v", pvc.PersistentVolumeClaim.ClaimName, err)
newPVClist = append(newPVClist, pvc)
if firstErr == nil {
firstErr = err
}
}
}
}
if len(newPVClist) != totalPVCs {
// we were successful in deleting one ore more PVCs
woc.log.Infof("Deleted %d/%d PVCs", totalPVCs-len(newPVClist), totalPVCs)
woc.wf.Status.PersistentVolumeClaims = newPVClist
woc.updated = true
}
return firstErr
}
func getChildNodeIndex(node *wfv1.NodeStatus, nodes wfv1.Nodes, index int) *wfv1.NodeStatus {
if len(node.Children) <= 0 {
return nil
}
nodeIndex := index
if index < 0 {
nodeIndex = len(node.Children) + index // This actually subtracts, since index is negative
if nodeIndex < 0 {
panic(fmt.Sprintf("child index '%d' out of bounds", index))
}
}
lastChildNodeName := node.Children[nodeIndex]
lastChildNode, ok := nodes[lastChildNodeName]
if !ok {
panic("could not find child node")
}
return &lastChildNode
}
type executeTemplateOpts struct {
// boundaryID is an ID for node grouping
boundaryID string
// onExitTemplate signifies that executeTemplate was called as part of an onExit handler.
// Necessary for graceful shutdowns
onExitTemplate bool
// activeDeadlineSeconds is a deadline to set to any pods executed. This is necessary for pods to inherit backoff.maxDuration
executionDeadline time.Time
}
// executeTemplate executes the template with the given arguments and returns the created NodeStatus
// for the created node (if created). Nodes may not be created if parallelism or deadline exceeded.
// nodeName is the name to be used as the name of the node, and boundaryID indicates which template
// boundary this node belongs to.
func (woc *wfOperationCtx) executeTemplate(nodeName string, orgTmpl wfv1.TemplateReferenceHolder, tmplCtx *templateresolution.Context, args wfv1.Arguments, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
woc.log.Debugf("Evaluating node %s: template: %s, boundaryID: %s", nodeName, common.GetTemplateHolderString(orgTmpl), opts.boundaryID)
node := woc.wf.GetNodeByName(nodeName)
// Set templateScope from which the template resolution starts.
templateScope := tmplCtx.GetTemplateScope()
newTmplCtx, resolvedTmpl, templateStored, err := tmplCtx.ResolveTemplate(orgTmpl)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
// A new template was stored during resolution, persist it
if templateStored {
woc.updated = true
}
localParams := make(map[string]string)
// Inject the pod name. If the pod has a retry strategy, the pod name will be changed and will be injected when it
// is determined
if resolvedTmpl.IsPodType() && woc.retryStrategy(resolvedTmpl) == nil {
localParams[common.LocalVarPodName] = woc.wf.NodeID(nodeName)
}
// Inputs has been processed with arguments already, so pass empty arguments.
processedTmpl, err := common.ProcessArgs(resolvedTmpl, &args, woc.globalParams, localParams, false)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
// If memoization is on, check if node output exists in cache
if node == nil && processedTmpl.Memoize != nil {
memoizationCache := woc.controller.cacheFactory.GetCache(controllercache.ConfigMapCache, processedTmpl.Memoize.Cache.ConfigMap.Name)
if memoizationCache == nil {
err := fmt.Errorf("cache could not be found or created")
woc.log.WithFields(log.Fields{"cacheName": processedTmpl.Memoize.Cache.ConfigMap.Name}).WithError(err)
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
entry, err := memoizationCache.Load(processedTmpl.Memoize.Key)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
hit := entry.Hit()
var outputs *wfv1.Outputs
if processedTmpl.Memoize.MaxAge != "" {
maxAge, err := time.ParseDuration(processedTmpl.Memoize.MaxAge)
if err != nil {
err := fmt.Errorf("invalid maxAge: %s", err)
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
maxAgeOutputs, ok := entry.GetOutputsWithMaxAge(maxAge)
if !ok {
// The outputs are expired, so this cache entry is not hit
hit = false
}
outputs = maxAgeOutputs
} else {
outputs = entry.GetOutputs()
}
memoizationStatus := &wfv1.MemoizationStatus{
Hit: hit,
Key: processedTmpl.Memoize.Key,
CacheName: processedTmpl.Memoize.Cache.ConfigMap.Name,
}
if hit {
node = woc.initializeCacheHitNode(nodeName, processedTmpl, templateScope, orgTmpl, opts.boundaryID, outputs, memoizationStatus)
} else {
node = woc.initializeCacheNode(nodeName, processedTmpl, templateScope, orgTmpl, opts.boundaryID, memoizationStatus)
}
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
}
if node != nil {
if node.Fulfilled() {
if processedTmpl.Synchronization != nil {
woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization)
}
woc.log.Debugf("Node %s already completed", nodeName)
if processedTmpl.Metrics != nil {
// Check if this node completed between executions. If it did, emit metrics. If a node completes within
// the same execution, its metrics are emitted below.
// We can infer that this node completed during the current operation, emit metrics
if prevNodeStatus, ok := woc.preExecutionNodePhases[node.ID]; ok && !prevNodeStatus.Fulfilled() {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, false)
}
}
return node, nil
}
woc.log.Debugf("Executing node %s of %s is %s", nodeName, node.Type, node.Phase)
// Memoized nodes don't have StartedAt.
if node.StartedAt.IsZero() {
node.StartedAt = metav1.Time{Time: time.Now().UTC()}
node.EstimatedDuration = woc.estimateNodeDuration(node.Name)
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
}
}
// Check if we took too long operating on this workflow and immediately return if we did
if time.Now().UTC().After(woc.deadline) {
woc.log.Warnf("Deadline exceeded")
woc.requeue(defaultRequeueTime)
return node, ErrDeadlineExceeded
}
// Check the template deadline for Pending nodes
// This check will cover the resource forbidden, synchronization scenario,
// In above scenario, only Node will be created in pending state
_, err = woc.checkTemplateTimeout(processedTmpl, node)
if err != nil {
woc.log.Warnf("Template %s exceeded its deadline", processedTmpl.Name)
return woc.markNodePhase(nodeName, wfv1.NodeFailed, err.Error()), err
}
// Check if we exceeded template or workflow parallelism and immediately return if we did
if err := woc.checkParallelism(processedTmpl, node, opts.boundaryID); err != nil {
return node, err
}
if processedTmpl.Synchronization != nil {
lockAcquired, wfUpdated, msg, err := woc.controller.syncManager.TryAcquire(woc.wf, woc.wf.NodeID(nodeName), processedTmpl.Synchronization)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
if !lockAcquired {
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfutil.GetNodeType(processedTmpl), templateScope, processedTmpl, orgTmpl, opts.boundaryID, wfv1.NodePending, msg)
}
lockName, err := argosync.GetLockName(processedTmpl.Synchronization, woc.wf.Namespace)
if err != nil {
// If an error were to be returned here, it would have been caught by TryAcquire. If it didn't, then it is
// unexpected behavior and is a bug.
panic("bug: GetLockName should not return an error after a call to TryAcquire")
}
return woc.markNodeWaitingForLock(node.Name, lockName.EncodeName()), nil
} else {
woc.log.Infof("Node %s acquired synchronization lock", nodeName)
if node != nil {
node.Message = ""
node = woc.markNodeWaitingForLock(node.Name, "")
}
}
woc.updated = wfUpdated
}
// If the user has specified retries, node becomes a special retry node.
// This node acts as a parent of all retries that will be done for
// the container. The status of this node should be "Success" if any
// of the retries succeed. Otherwise, it is "Failed".
retryNodeName := ""
if woc.retryStrategy(processedTmpl) != nil {
retryNodeName = nodeName
retryParentNode := node
if retryParentNode == nil {
woc.log.Debugf("Inject a retry node for node %s", retryNodeName)
retryParentNode = woc.initializeExecutableNode(retryNodeName, wfv1.NodeTypeRetry, templateScope, processedTmpl, orgTmpl, opts.boundaryID, wfv1.NodeRunning)
}
processedRetryParentNode, continueExecution, err := woc.processNodeRetries(retryParentNode, *woc.retryStrategy(processedTmpl), opts)
if err != nil {
return woc.markNodeError(retryNodeName, err), err
} else if !continueExecution {
// We are still waiting for a retry delay to finish
return retryParentNode, nil
}
retryParentNode = processedRetryParentNode
// The retry node might have completed by now.
if retryParentNode.Fulfilled() {
if processedTmpl.Metrics != nil {
// In this check, a completed node may or may not have existed prior to this execution. If it did exist, ensure that it wasn't
// completed before this execution. If it did not exist prior, then we can infer that it was completed during this execution.
// The statement "(!ok || !prevNodeStatus.Fulfilled())" checks for this behavior and represents the material conditional
// "ok -> !prevNodeStatus.Fulfilled()" (https://en.wikipedia.org/wiki/Material_conditional)
if prevNodeStatus, ok := woc.preExecutionNodePhases[retryParentNode.ID]; (!ok || !prevNodeStatus.Fulfilled()) && retryParentNode.Fulfilled() {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, false)
}
}
return retryParentNode, nil
}
lastChildNode := getChildNodeIndex(retryParentNode, woc.wf.Status.Nodes, -1)
if lastChildNode != nil && !lastChildNode.Fulfilled() {
// Last child node is still running.
nodeName = lastChildNode.Name
node = lastChildNode
} else {
// Create a new child node and append it to the retry node.
nodeName = fmt.Sprintf("%s(%d)", retryNodeName, len(retryParentNode.Children))
woc.addChildNode(retryNodeName, nodeName)
node = nil
localParams := make(map[string]string)
// Change the `pod.name` variable to the new retry node name
if processedTmpl.IsPodType() {
localParams[common.LocalVarPodName] = woc.wf.NodeID(nodeName)
}
// Inject the retryAttempt number
localParams[common.LocalVarRetries] = strconv.Itoa(len(retryParentNode.Children))
processedTmpl, err = common.SubstituteParams(processedTmpl, map[string]string{}, localParams)
if err != nil {
return woc.initializeNodeOrMarkError(node, nodeName, templateScope, orgTmpl, opts.boundaryID, err), err
}
}
}
switch processedTmpl.GetType() {
case wfv1.TemplateTypeContainer:
node, err = woc.executeContainer(nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeSteps:
node, err = woc.executeSteps(nodeName, newTmplCtx, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeScript:
node, err = woc.executeScript(nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeResource:
node, err = woc.executeResource(nodeName, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeDAG:
node, err = woc.executeDAG(nodeName, newTmplCtx, templateScope, processedTmpl, orgTmpl, opts)
case wfv1.TemplateTypeSuspend:
node, err = woc.executeSuspend(nodeName, templateScope, processedTmpl, orgTmpl, opts)
default:
err = errors.Errorf(errors.CodeBadRequest, "Template '%s' missing specification", processedTmpl.Name)
return woc.initializeNode(nodeName, wfv1.NodeTypeSkipped, templateScope, orgTmpl, opts.boundaryID, wfv1.NodeError, err.Error()), err
}
if err != nil {
node = woc.markNodeError(nodeName, err)
if processedTmpl.Synchronization != nil {
woc.controller.syncManager.Release(woc.wf, node.ID, processedTmpl.Synchronization)
}
// If retry policy is not set, or if it is not set to Always or OnError, we won't attempt to retry an errored container
// and we return instead.
retryStrategy := woc.retryStrategy(processedTmpl)
if retryStrategy == nil ||
(retryStrategy.RetryPolicy != wfv1.RetryPolicyAlways &&
retryStrategy.RetryPolicy != wfv1.RetryPolicyOnError) {
return node, err
}
}
if processedTmpl.Metrics != nil {
// Check if the node was just created, if it was emit realtime metrics.
// If the node did not previously exist, we can infer that it was created during the current operation, emit real time metrics.
if _, ok := woc.preExecutionNodePhases[node.ID]; !ok {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, true)
}
// Check if the node completed during this execution, if it did emit metrics
//
// This check is necessary because sometimes a node will be marked completed during the current execution and will
// not be considered again. The best example of this is the entrypoint steps/dag template (once completed, the
// workflow ends and it's not reconsidered). This checks makes sure that its metrics also get emitted.
//
// In this check, a completed node may or may not have existed prior to this execution. If it did exist, ensure that it wasn't
// completed before this execution. If it did not exist prior, then we can infer that it was completed during this execution.
// The statement "(!ok || !prevNodeStatus.Fulfilled())" checks for this behavior and represents the material conditional
// "ok -> !prevNodeStatus.Fulfilled()" (https://en.wikipedia.org/wiki/Material_conditional)
if prevNodeStatus, ok := woc.preExecutionNodePhases[node.ID]; (!ok || !prevNodeStatus.Fulfilled()) && node.Fulfilled() {
localScope, realTimeScope := woc.prepareMetricScope(node)
woc.computeMetrics(processedTmpl.Metrics.Prometheus, localScope, realTimeScope, false)
}
}
node = woc.wf.GetNodeByName(node.Name)
// Swap the node back to retry node
if retryNodeName != "" {
retryNode := woc.wf.GetNodeByName(retryNodeName)
if !retryNode.Fulfilled() && node.Fulfilled() { //if the retry child has completed we need to update outself
node, err = woc.executeTemplate(retryNodeName, orgTmpl, tmplCtx, args, opts)
if err != nil {
return woc.markNodeError(node.Name, err), err
}
}
node = retryNode
}
return node, nil
}
// Checks if the template has exceeded its deadline
func (woc *wfOperationCtx) checkTemplateTimeout(tmpl *wfv1.Template, node *wfv1.NodeStatus) (*time.Time, error) {
if node == nil {
return nil, nil
}
if tmpl.Timeout != "" {
tmplTimeout, err := time.ParseDuration(tmpl.Timeout)
if err != nil {
return nil, fmt.Errorf("invalid timeout format. %v", err)
}
deadline := node.StartedAt.Add(tmplTimeout)
if node.Phase == wfv1.NodePending && time.Now().After(deadline) {
return nil, ErrTimeout
}
return &deadline, nil
}
return nil, nil
}
// markWorkflowPhase is a convenience method to set the phase of the workflow with optional message
// optionally marks the workflow completed, which sets the finishedAt timestamp and completed label
func (woc *wfOperationCtx) markWorkflowPhase(phase wfv1.NodePhase, message string) {
markCompleted := false
if woc.wf.Status.Phase != phase {
if woc.wf.Status.Phase.Fulfilled() {
woc.log.WithFields(log.Fields{"fromPhase": woc.wf.Status.Phase, "toPhase": phase}).
Panic("workflow is already fulfilled")
}
woc.log.Infof("Updated phase %s -> %s", woc.wf.Status.Phase, phase)
woc.updated = true
woc.wf.Status.Phase = phase
if woc.wf.ObjectMeta.Labels == nil {
woc.wf.ObjectMeta.Labels = make(map[string]string)
}
woc.wf.ObjectMeta.Labels[common.LabelKeyPhase] = string(phase)
switch phase {
case wfv1.NodeRunning:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeNormal, "WorkflowRunning", "Workflow Running")
case wfv1.NodeSucceeded:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeNormal, "WorkflowSucceeded", "Workflow completed")
case wfv1.NodeFailed, wfv1.NodeError:
woc.eventRecorder.Event(woc.wf, apiv1.EventTypeWarning, "WorkflowFailed", message)
}
markCompleted = phase.Completed()
}
if woc.wf.Status.StartedAt.IsZero() {
woc.updated = true
woc.wf.Status.StartedAt = metav1.Time{Time: time.Now().UTC()}
woc.wf.Status.EstimatedDuration = woc.estimateWorkflowDuration()
}
if woc.wf.Status.Message != message {
woc.log.Infof("Updated message %s -> %s", woc.wf.Status.Message, message)
woc.updated = true
woc.wf.Status.Message = message
}
if phase == wfv1.NodeError {
entryNode, ok := woc.wf.Status.Nodes[woc.wf.ObjectMeta.Name]
if ok && entryNode.Phase == wfv1.NodeRunning {
entryNode.Phase = wfv1.NodeError
entryNode.Message = "Workflow operation error"
woc.wf.Status.Nodes[woc.wf.ObjectMeta.Name] = entryNode
woc.updated = true
}
}
switch phase {
case wfv1.NodeSucceeded, wfv1.NodeFailed, wfv1.NodeError:
// wait for all daemon nodes to get terminated before marking workflow completed
if markCompleted && !woc.hasDaemonNodes() {
woc.log.Infof("Marking workflow completed")
woc.wf.Status.FinishedAt = metav1.Time{Time: time.Now().UTC()}
woc.globalParams[common.GlobalVarWorkflowDuration] = fmt.Sprintf("%f", woc.wf.Status.FinishedAt.Sub(woc.wf.Status.StartedAt.Time).Seconds())
if woc.wf.ObjectMeta.Labels == nil {
woc.wf.ObjectMeta.Labels = make(map[string]string)
}
woc.wf.ObjectMeta.Labels[common.LabelKeyCompleted] = "true"
woc.wf.Status.Conditions.UpsertCondition(wfv1.Condition{Status: metav1.ConditionTrue, Type: wfv1.ConditionTypeCompleted})
err := woc.deletePDBResource()
if err != nil {
woc.wf.Status.Phase = wfv1.NodeError
woc.wf.ObjectMeta.Labels[common.LabelKeyPhase] = string(wfv1.NodeError)
woc.updated = true
woc.wf.Status.Message = err.Error()
}
if woc.controller.wfArchive.IsEnabled() {
if woc.controller.isArchivable(woc.wf) {
woc.log.Infof("Marking workflow as pending archiving")
woc.wf.Labels[common.LabelKeyWorkflowArchivingStatus] = "Pending"
} else {
woc.log.Infof("Doesn't match with archive label selector. Skipping Archive")
}
}
woc.updated = true
}
}
}
// get a predictor, this maybe null implementation in the case of rare error
func (woc *wfOperationCtx) getEstimator() estimation.Estimator {
if woc.estimator == nil {
woc.estimator, _ = woc.controller.estimatorFactory.NewEstimator(woc.wf)
}
return woc.estimator
}
func (woc *wfOperationCtx) estimateWorkflowDuration() wfv1.EstimatedDuration {
return woc.getEstimator().EstimateWorkflowDuration()
}
func (woc *wfOperationCtx) estimateNodeDuration(nodeName string) wfv1.EstimatedDuration {
return woc.getEstimator().EstimateNodeDuration(nodeName)
}
func (woc *wfOperationCtx) hasDaemonNodes() bool {
for _, node := range woc.wf.Status.Nodes {
if node.IsDaemoned() {
return true
}
}
return false
}
func (woc *wfOperationCtx) markWorkflowRunning() {
woc.markWorkflowPhase(wfv1.NodeRunning, "")
}
func (woc *wfOperationCtx) markWorkflowSuccess() {
woc.markWorkflowPhase(wfv1.NodeSucceeded, "")
}
func (woc *wfOperationCtx) markWorkflowFailed(message string) {
woc.markWorkflowPhase(wfv1.NodeFailed, message)
}
func (woc *wfOperationCtx) markWorkflowError(err error) {
woc.markWorkflowPhase(wfv1.NodeError, err.Error())
}
// stepsOrDagSeparator identifies if a node name starts with our naming convention separator from
// DAG or steps templates. Will match stings with prefix like: [0]. or .
var stepsOrDagSeparator = regexp.MustCompile(`^(\[\d+\])?\.`)
// initializeExecutableNode initializes a node and stores the template.
func (woc *wfOperationCtx) initializeExecutableNode(nodeName string, nodeType wfv1.NodeType, templateScope string, executeTmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, phase wfv1.NodePhase, messages ...string) *wfv1.NodeStatus {
node := woc.initializeNode(nodeName, nodeType, templateScope, orgTmpl, boundaryID, phase)
// Set the input values to the node.
if executeTmpl.Inputs.HasInputs() {
node.Inputs = executeTmpl.Inputs.DeepCopy()
}
if nodeType == wfv1.NodeTypeSuspend {
node = addRawOutputFields(node, executeTmpl)
}
if len(messages) > 0 {
node.Message = messages[0]
}
// Update the node
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
return node
}
// initializeNodeOrMarkError initializes an error node or mark a node if it already exists.
func (woc *wfOperationCtx) initializeNodeOrMarkError(node *wfv1.NodeStatus, nodeName string, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, err error) *wfv1.NodeStatus {
if node != nil {
return woc.markNodeError(nodeName, err)
}
return woc.initializeNode(nodeName, wfv1.NodeTypeSkipped, templateScope, orgTmpl, boundaryID, wfv1.NodeError, err.Error())
}
// Creates a node status that is or will be chaced
func (woc *wfOperationCtx) initializeCacheNode(nodeName string, resolvedTmpl *wfv1.Template, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, memStat *wfv1.MemoizationStatus, messages ...string) *wfv1.NodeStatus {
if resolvedTmpl.Memoize == nil {
err := fmt.Errorf("cannot initialize a cached node from a non-memoized template")
woc.log.WithFields(log.Fields{"namespace": woc.wf.Namespace, "wfName": woc.wf.Name}).WithError(err)
panic(err)
}
woc.log.Debug("Initializing cached node ", nodeName, common.GetTemplateHolderString(orgTmpl), boundaryID)
node := woc.initializeExecutableNode(nodeName, wfutil.GetNodeType(resolvedTmpl), templateScope, resolvedTmpl, orgTmpl, boundaryID, wfv1.NodePending, messages...)
node.MemoizationStatus = memStat
return node
}
// Creates a node status that has been cached, completely initialized, and marked as finished
func (woc *wfOperationCtx) initializeCacheHitNode(nodeName string, resolvedTmpl *wfv1.Template, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, outputs *wfv1.Outputs, memStat *wfv1.MemoizationStatus, messages ...string) *wfv1.NodeStatus {
node := woc.initializeCacheNode(nodeName, resolvedTmpl, templateScope, orgTmpl, boundaryID, memStat, messages...)
node.Phase = wfv1.NodeSucceeded
node.Outputs = outputs
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
return node
}
func (woc *wfOperationCtx) initializeNode(nodeName string, nodeType wfv1.NodeType, templateScope string, orgTmpl wfv1.TemplateReferenceHolder, boundaryID string, phase wfv1.NodePhase, messages ...string) *wfv1.NodeStatus {
woc.log.Debugf("Initializing node %s: template: %s, boundaryID: %s", nodeName, common.GetTemplateHolderString(orgTmpl), boundaryID)
nodeID := woc.wf.NodeID(nodeName)
_, ok := woc.wf.Status.Nodes[nodeID]
if ok {
panic(fmt.Sprintf("node %s already initialized", nodeName))
}
node := wfv1.NodeStatus{
ID: nodeID,
Name: nodeName,
TemplateName: orgTmpl.GetTemplateName(),
TemplateRef: orgTmpl.GetTemplateRef(),
TemplateScope: templateScope,
Type: nodeType,
BoundaryID: boundaryID,
Phase: phase,
StartedAt: metav1.Time{Time: time.Now().UTC()},
EstimatedDuration: woc.estimateNodeDuration(nodeName),
}
if boundaryNode, ok := woc.wf.Status.Nodes[boundaryID]; ok {
node.DisplayName = strings.TrimPrefix(node.Name, boundaryNode.Name)
if stepsOrDagSeparator.MatchString(node.DisplayName) {
node.DisplayName = stepsOrDagSeparator.ReplaceAllString(node.DisplayName, "")
}
} else {
node.DisplayName = nodeName
}
if node.Fulfilled() && node.FinishedAt.IsZero() {
node.FinishedAt = node.StartedAt
}
var message string
if len(messages) > 0 {
message = fmt.Sprintf(" (message: %s)", messages[0])
node.Message = messages[0]
}
woc.wf.Status.Nodes[nodeID] = node
woc.log.Infof("%s node %v initialized %s%s", node.Type, node.ID, node.Phase, message)
woc.updated = true
return &node
}
// markNodePhase marks a node with the given phase, creating the node if necessary and handles timestamps
func (woc *wfOperationCtx) markNodePhase(nodeName string, phase wfv1.NodePhase, message ...string) *wfv1.NodeStatus {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
panic(fmt.Sprintf("workflow '%s' node '%s' uninitialized when marking as %v: %s", woc.wf.Name, nodeName, phase, message))
}
if node.Phase != phase {
if node.Phase.Fulfilled() {
woc.log.WithFields(log.Fields{"nodeName": node.Name, "fromPhase": node.Phase, "toPhase": phase}).
Error("node is already fulfilled")
}
woc.log.Infof("node %s phase %s -> %s", node.ID, node.Phase, phase)
node.Phase = phase
woc.updated = true
}
if len(message) > 0 {
if message[0] != node.Message {
woc.log.Infof("node %s message: %s", node.ID, message[0])
node.Message = message[0]
woc.updated = true
}
}
if node.Fulfilled() && node.FinishedAt.IsZero() {
node.FinishedAt = metav1.Time{Time: time.Now().UTC()}
woc.log.Infof("node %s finished: %s", node.ID, node.FinishedAt)
woc.updated = true
}
if !woc.orig.Status.Nodes[node.ID].Fulfilled() && node.Fulfilled() {
woc.onNodeComplete(node)
}
woc.wf.Status.Nodes[node.ID] = *node
return node
}
func (woc *wfOperationCtx) onNodeComplete(node *wfv1.NodeStatus) {
if !woc.controller.Config.NodeEvents.IsEnabled() {
return
}
message := fmt.Sprintf("%v node %s", node.Phase, node.Name)
if node.Message != "" {
message = message + ": " + node.Message
}
eventType := apiv1.EventTypeWarning
if node.Phase == wfv1.NodeSucceeded {
eventType = apiv1.EventTypeNormal
}
woc.eventRecorder.AnnotatedEventf(
woc.wf,
map[string]string{
common.AnnotationKeyNodeType: string(node.Type),
common.AnnotationKeyNodeName: node.Name,
},
eventType,
fmt.Sprintf("WorkflowNode%s", node.Phase),
message,
)
}
// markNodeError is a convenience method to mark a node with an error and set the message from the error
func (woc *wfOperationCtx) markNodeError(nodeName string, err error) *wfv1.NodeStatus {
woc.log.WithError(err).WithField("nodeName", nodeName).Error("Mark error node")
return woc.markNodePhase(nodeName, wfv1.NodeError, err.Error())
}
// markNodePending is a convenience method to mark a node and set the message from the error
func (woc *wfOperationCtx) markNodePending(nodeName string, err error) *wfv1.NodeStatus {
woc.log.Infof("Mark node %s as Pending, due to: %+v", nodeName, err)
node := woc.wf.GetNodeByName(nodeName)
return woc.markNodePhase(nodeName, wfv1.NodePending, fmt.Sprintf("Pending %s", time.Since(node.StartedAt.Time)))
}
// markNodeWaitingForLock is a convenience method to mark that a node is waiting for a lock
func (woc *wfOperationCtx) markNodeWaitingForLock(nodeName string, lockName string) *wfv1.NodeStatus {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
return node
}
if node.SynchronizationStatus == nil {
node.SynchronizationStatus = &wfv1.NodeSynchronizationStatus{}
}
if lockName == "" {
// If we are no longer waiting for a lock, nil out the sync status
node.SynchronizationStatus = nil
} else {
node.SynchronizationStatus.Waiting = lockName
}
woc.wf.Status.Nodes[node.ID] = *node
woc.updated = true
return node
}
// checkParallelism checks if the given template is able to be executed, considering the current active pods and workflow/template parallelism
func (woc *wfOperationCtx) checkParallelism(tmpl *wfv1.Template, node *wfv1.NodeStatus, boundaryID string) error {
if woc.execWf.Spec.Parallelism != nil && woc.activePods >= *woc.execWf.Spec.Parallelism {
woc.log.Infof("workflow active pod spec parallelism reached %d/%d", woc.activePods, *woc.execWf.Spec.Parallelism)
return ErrParallelismReached
}
// TODO: repeated calls to countActivePods is not optimal
switch tmpl.GetType() {
case wfv1.TemplateTypeDAG, wfv1.TemplateTypeSteps:
// if we are about to execute a DAG/Steps template, make sure we havent already reached our limit
if tmpl.Parallelism != nil && node != nil {
templateActivePods := woc.countActivePods(node.ID)
if templateActivePods >= *tmpl.Parallelism {
woc.log.Infof("template (node %s) active pod parallelism reached %d/%d", node.ID, templateActivePods, *tmpl.Parallelism)
return ErrParallelismReached
}
}
fallthrough
default:
// if we are about to execute a pod, make our parent hasn't reached it's limit
if boundaryID != "" && (node == nil || (node.Phase != wfv1.NodePending && node.Phase != wfv1.NodeRunning)) {
boundaryNode, ok := woc.wf.Status.Nodes[boundaryID]
if !ok {
return errors.InternalError("boundaryNode not found")
}
tmplCtx, err := woc.createTemplateContext(boundaryNode.GetTemplateScope())
if err != nil {
return err
}
_, boundaryTemplate, templateStored, err := tmplCtx.ResolveTemplate(&boundaryNode)
if err != nil {
return err
}
// A new template was stored during resolution, persist it
if templateStored {
woc.updated = true
}
if boundaryTemplate != nil && boundaryTemplate.Parallelism != nil {
activeSiblings := woc.countActiveChildren(boundaryID)
woc.log.Debugf("counted %d/%d active children in boundary %s", activeSiblings, *boundaryTemplate.Parallelism, boundaryID)
if activeSiblings >= *boundaryTemplate.Parallelism {
woc.log.Infof("template (node %s) active children parallelism reached %d/%d", boundaryID, activeSiblings, *boundaryTemplate.Parallelism)
return ErrParallelismReached
}
}
}
}
return nil
}
func (woc *wfOperationCtx) executeContainer(nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
}
// Check if the output of this container is referenced elsewhere in the Workflow. If so, make sure to include it during
// execution.
includeScriptOutput, err := woc.includeScriptOutput(nodeName, opts.boundaryID)
if err != nil {
return node, err
}
woc.log.Debugf("Executing node %s with container template: %v\n", nodeName, tmpl)
_, err = woc.createWorkflowPod(nodeName, *tmpl.Container, tmpl, &createWorkflowPodOpts{
includeScriptOutput: includeScriptOutput,
onExitPod: opts.onExitTemplate,
executionDeadline: opts.executionDeadline,
})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, err
}
func (woc *wfOperationCtx) getOutboundNodes(nodeID string) []string {
node := woc.wf.Status.Nodes[nodeID]
switch node.Type {
case wfv1.NodeTypePod, wfv1.NodeTypeSkipped, wfv1.NodeTypeSuspend:
return []string{node.ID}
case wfv1.NodeTypeTaskGroup:
if len(node.Children) == 0 {
return []string{node.ID}
}
outboundNodes := make([]string, 0)
for _, child := range node.Children {
outboundNodes = append(outboundNodes, woc.getOutboundNodes(child)...)
}
return outboundNodes
case wfv1.NodeTypeRetry:
numChildren := len(node.Children)
if numChildren > 0 {
return []string{node.Children[numChildren-1]}
}
}
outbound := make([]string, 0)
for _, outboundNodeID := range node.OutboundNodes {
outNode := woc.wf.Status.Nodes[outboundNodeID]
if outNode.Type == wfv1.NodeTypePod {
outbound = append(outbound, outboundNodeID)
} else {
subOutIDs := woc.getOutboundNodes(outboundNodeID)
outbound = append(outbound, subOutIDs...)
}
}
return outbound
}
// getTemplateOutputsFromScope resolves a template's outputs from the scope of the template
func getTemplateOutputsFromScope(tmpl *wfv1.Template, scope *wfScope) (*wfv1.Outputs, error) {
if !tmpl.Outputs.HasOutputs() {
return nil, nil
}
var outputs wfv1.Outputs
if len(tmpl.Outputs.Parameters) > 0 {
outputs.Parameters = make([]wfv1.Parameter, 0)
for _, param := range tmpl.Outputs.Parameters {
if param.ValueFrom == nil {
return nil, fmt.Errorf("output parameters must have a valueFrom specified")
}
val, err := scope.resolveParameter(param.ValueFrom.Parameter)
if err != nil {
// We have a default value to use instead of returning an error
if param.ValueFrom.Default != nil {
val = param.ValueFrom.Default.String()
} else {
return nil, err
}
}
param.Value = wfv1.AnyStringPtr(val)
param.ValueFrom = nil
outputs.Parameters = append(outputs.Parameters, param)
}
}
if len(tmpl.Outputs.Artifacts) > 0 {
outputs.Artifacts = make([]wfv1.Artifact, 0)
for _, art := range tmpl.Outputs.Artifacts {
resolvedArt, err := scope.resolveArtifact(art.From, art.SubPath)
if err != nil {
// If the artifact was not found and is optional, don't mark an error
if strings.Contains(err.Error(), "Unable to resolve") && art.Optional {
log.Warnf("Optional artifact '%s' was not found; it won't be available as an output", art.Name)
continue
}
return nil, fmt.Errorf("unable to resolve outputs from scope: %s", err)
}
resolvedArt.Name = art.Name
outputs.Artifacts = append(outputs.Artifacts, *resolvedArt)
}
}
return &outputs, nil
}
// hasOutputResultRef will check given template output has any reference
func hasOutputResultRef(name string, parentTmpl *wfv1.Template) bool {
var variableRefName string
if parentTmpl.DAG != nil {
variableRefName = "{{tasks." + name + ".outputs.result}}"
} else if parentTmpl.Steps != nil {
variableRefName = "{{steps." + name + ".outputs.result}}"
}
jsonValue, err := json.Marshal(parentTmpl)
if err != nil {
log.Warnf("Unable to marshal the template. %v, %v", parentTmpl, err)
}
return strings.Contains(string(jsonValue), variableRefName)
}
// getStepOrDAGTaskName will extract the node from NodeStatus Name
func getStepOrDAGTaskName(nodeName string) string {
if strings.Contains(nodeName, ".") {
name := nodeName[strings.LastIndex(nodeName, ".")+1:]
// Retry, withItems and withParam scenario
if indx := strings.Index(name, "("); indx > 0 {
return name[0:indx]
}
return name
}
return nodeName
}
func extractMainCtrFromScriptTemplate(tmpl *wfv1.Template) apiv1.Container {
mainCtr := tmpl.Script.Container
// If script source is provided then pass all container args to the
// script instead of passing them to the container command directly
if tmpl.Script.Source != "" {
mainCtr.Args = append([]string{common.ExecutorScriptSourcePath}, mainCtr.Args...)
}
return mainCtr
}
func (woc *wfOperationCtx) executeScript(nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
} else if !node.Pending() {
return node, nil
}
// Check if the output of this script is referenced elsewhere in the Workflow. If so, make sure to include it during
// execution.
includeScriptOutput, err := woc.includeScriptOutput(nodeName, opts.boundaryID)
if err != nil {
return node, err
}
mainCtr := extractMainCtrFromScriptTemplate(tmpl)
_, err = woc.createWorkflowPod(nodeName, mainCtr, tmpl, &createWorkflowPodOpts{
includeScriptOutput: includeScriptOutput,
onExitPod: opts.onExitTemplate,
executionDeadline: opts.executionDeadline,
})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, err
}
func (woc *wfOperationCtx) requeueIfTransientErr(err error, nodeName string) (*wfv1.NodeStatus, error) {
if errorsutil.IsTransientErr(err) {
// Our error was most likely caused by a lack of resources.
woc.requeue(defaultRequeueTime)
return woc.markNodePending(nodeName, err), nil
}
return nil, err
}
// buildLocalScope adds all of a nodes outputs to the local scope with the given prefix, as well
// as the global scope, if specified with a globalName
func (woc *wfOperationCtx) buildLocalScope(scope *wfScope, prefix string, node *wfv1.NodeStatus) {
// It may be that the node is a retry node, in which case we want to get the outputs of the last node
// in the retry group instead of the retry node itself.
if node.Type == wfv1.NodeTypeRetry {
node = getChildNodeIndex(node, woc.wf.Status.Nodes, -1)
}
if node.ID != "" {
key := fmt.Sprintf("%s.id", prefix)
scope.addParamToScope(key, node.ID)
}
if !node.StartedAt.Time.IsZero() {
key := fmt.Sprintf("%s.startedAt", prefix)
scope.addParamToScope(key, node.StartedAt.Time.Format(time.RFC3339))
}
if !node.FinishedAt.Time.IsZero() {
key := fmt.Sprintf("%s.finishedAt", prefix)
scope.addParamToScope(key, node.FinishedAt.Time.Format(time.RFC3339))
}
if node.PodIP != "" {
key := fmt.Sprintf("%s.ip", prefix)
scope.addParamToScope(key, node.PodIP)
}
if node.Phase != "" {
key := fmt.Sprintf("%s.status", prefix)
scope.addParamToScope(key, string(node.Phase))
}
woc.addOutputsToLocalScope(prefix, node.Outputs, scope)
}
func (woc *wfOperationCtx) addOutputsToLocalScope(prefix string, outputs *wfv1.Outputs, scope *wfScope) {
if outputs == nil || scope == nil {
return
}
if prefix != "workflow" && outputs.Result != nil {
scope.addParamToScope(fmt.Sprintf("%s.outputs.result", prefix), *outputs.Result)
}
if prefix != "workflow" && outputs.ExitCode != nil {
scope.addParamToScope(fmt.Sprintf("%s.exitCode", prefix), *outputs.ExitCode)
}
for _, param := range outputs.Parameters {
if param.Value != nil {
scope.addParamToScope(fmt.Sprintf("%s.outputs.parameters.%s", prefix, param.Name), param.Value.String())
}
}
for _, art := range outputs.Artifacts {
scope.addArtifactToScope(fmt.Sprintf("%s.outputs.artifacts.%s", prefix, art.Name), art)
}
}
func (woc *wfOperationCtx) addOutputsToGlobalScope(outputs *wfv1.Outputs) {
if outputs == nil {
return
}
for _, param := range outputs.Parameters {
woc.addParamToGlobalScope(param)
}
for _, art := range outputs.Artifacts {
woc.addArtifactToGlobalScope(art, nil)
}
}
// loopNodes is a node list which supports sorting by loop index
type loopNodes []wfv1.NodeStatus
func (n loopNodes) Len() int {
return len(n)
}
func parseLoopIndex(s string) int {
s = strings.SplitN(s, "(", 2)[1]
s = strings.SplitN(s, ":", 2)[0]
val, err := strconv.Atoi(s)
if err != nil {
panic(fmt.Sprintf("failed to parse '%s' as int: %v", s, err))
}
return val
}
func (n loopNodes) Less(i, j int) bool {
left := parseLoopIndex(n[i].DisplayName)
right := parseLoopIndex(n[j].DisplayName)
return left < right
}
func (n loopNodes) Swap(i, j int) {
n[i], n[j] = n[j], n[i]
}
// processAggregateNodeOutputs adds the aggregated outputs of a withItems/withParam template as a
// parameter in the form of a JSON list
func (woc *wfOperationCtx) processAggregateNodeOutputs(tmpl *wfv1.Template, scope *wfScope, prefix string, childNodes []wfv1.NodeStatus) error {
if len(childNodes) == 0 {
return nil
}
// need to sort the child node list so that the order of outputs are preserved
sort.Sort(loopNodes(childNodes))
paramList := make([]map[string]string, 0)
outputParamValueLists := make(map[string][]string)
resultsList := make([]wfv1.Item, 0)
for _, node := range childNodes {
if node.Outputs == nil {
continue
}
if len(node.Outputs.Parameters) > 0 {
param := make(map[string]string)
for _, p := range node.Outputs.Parameters {
param[p.Name] = p.Value.String()
outputParamValueList := outputParamValueLists[p.Name]
outputParamValueList = append(outputParamValueList, p.Value.String())
outputParamValueLists[p.Name] = outputParamValueList
}
paramList = append(paramList, param)
}
if node.Outputs.Result != nil {
// Support the case where item may be a map
var item wfv1.Item
err := json.Unmarshal([]byte(*node.Outputs.Result), &item)
if err != nil {
return err
}
resultsList = append(resultsList, item)
}
}
if tmpl.GetType() == wfv1.TemplateTypeScript || tmpl.GetType() == wfv1.TemplateTypeContainer {
resultsJSON, err := json.Marshal(resultsList)
if err != nil {
return err
}
key := fmt.Sprintf("%s.outputs.result", prefix)
scope.addParamToScope(key, string(resultsJSON))
}
outputsJSON, err := json.Marshal(paramList)
if err != nil {
return err
}
key := fmt.Sprintf("%s.outputs.parameters", prefix)
scope.addParamToScope(key, string(outputsJSON))
// Adding per-output aggregated value placeholders
for outputName, valueList := range outputParamValueLists {
key = fmt.Sprintf("%s.outputs.parameters.%s", prefix, outputName)
valueListJSON, err := json.Marshal(valueList)
if err != nil {
return err
}
scope.addParamToScope(key, string(valueListJSON))
}
return nil
}
// addParamToGlobalScope exports any desired node outputs to the global scope, and adds it to the global outputs.
func (woc *wfOperationCtx) addParamToGlobalScope(param wfv1.Parameter) {
if param.GlobalName == "" {
return
}
index := -1
if woc.wf.Status.Outputs != nil {
for i, gParam := range woc.wf.Status.Outputs.Parameters {
if gParam.Name == param.GlobalName {
index = i
break
}
}
} else {
woc.wf.Status.Outputs = &wfv1.Outputs{}
}
paramName := fmt.Sprintf("workflow.outputs.parameters.%s", param.GlobalName)
woc.globalParams[paramName] = param.Value.String()
if index == -1 {
woc.log.Infof("setting %s: '%s'", paramName, param.Value)
gParam := wfv1.Parameter{Name: param.GlobalName, Value: param.Value}
woc.wf.Status.Outputs.Parameters = append(woc.wf.Status.Outputs.Parameters, gParam)
woc.updated = true
} else {
prevVal := *woc.wf.Status.Outputs.Parameters[index].Value
if prevVal != *param.Value {
woc.log.Infof("overwriting %s: '%s' -> '%s'", paramName, woc.wf.Status.Outputs.Parameters[index].Value, param.Value)
woc.wf.Status.Outputs.Parameters[index].Value = param.Value
woc.updated = true
}
}
}
// addArtifactToGlobalScope exports any desired node outputs to the global scope
// Optionally adds to a local scope if supplied
func (woc *wfOperationCtx) addArtifactToGlobalScope(art wfv1.Artifact, scope *wfScope) {
if art.GlobalName == "" {
return
}
globalArtName := fmt.Sprintf("workflow.outputs.artifacts.%s", art.GlobalName)
if woc.wf.Status.Outputs != nil {
for i, gArt := range woc.wf.Status.Outputs.Artifacts {
if gArt.Name == art.GlobalName {
// global output already exists. overwrite the value if different
art.Name = art.GlobalName
art.GlobalName = ""
art.Path = ""
if !reflect.DeepEqual(woc.wf.Status.Outputs.Artifacts[i], art) {
woc.wf.Status.Outputs.Artifacts[i] = art
if scope != nil {
scope.addArtifactToScope(globalArtName, art)
}
woc.log.Infof("overwriting %s: %v", globalArtName, art)
woc.updated = true
}
return
}
}
} else {
woc.wf.Status.Outputs = &wfv1.Outputs{}
}
// global output does not yet exist
art.Name = art.GlobalName
art.GlobalName = ""
art.Path = ""
woc.log.Infof("setting %s: %v", globalArtName, art)
woc.wf.Status.Outputs.Artifacts = append(woc.wf.Status.Outputs.Artifacts, art)
if scope != nil {
scope.addArtifactToScope(globalArtName, art)
}
woc.updated = true
}
// addChildNode adds a nodeID as a child to a parent
// parent and child are both node names
func (woc *wfOperationCtx) addChildNode(parent string, child string) {
parentID := woc.wf.NodeID(parent)
childID := woc.wf.NodeID(child)
node, ok := woc.wf.Status.Nodes[parentID]
if !ok {
panic(fmt.Sprintf("parent node %s not initialized", parent))
}
for _, nodeID := range node.Children {
if childID == nodeID {
// already exists
return
}
}
node.Children = append(node.Children, childID)
woc.wf.Status.Nodes[parentID] = node
woc.updated = true
}
// executeResource is runs a kubectl command against a manifest
func (woc *wfOperationCtx) executeResource(nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypePod, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
} else if !node.Pending() {
return node, nil
}
tmpl = tmpl.DeepCopy()
// Try to unmarshal the given manifest.
obj := unstructured.Unstructured{}
err := yaml.Unmarshal([]byte(tmpl.Resource.Manifest), &obj)
if err != nil {
return node, err
}
if tmpl.Resource.SetOwnerReference {
ownerReferences := obj.GetOwnerReferences()
obj.SetOwnerReferences(append(ownerReferences, *metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind))))
bytes, err := yaml.Marshal(obj.Object)
if err != nil {
return node, err
}
tmpl.Resource.Manifest = string(bytes)
}
mainCtr := woc.newExecContainer(common.MainContainerName, tmpl)
mainCtr.Command = []string{"argoexec", "resource", tmpl.Resource.Action}
_, err = woc.createWorkflowPod(nodeName, *mainCtr, tmpl, &createWorkflowPodOpts{onExitPod: opts.onExitTemplate, executionDeadline: opts.executionDeadline})
if err != nil {
return woc.requeueIfTransientErr(err, node.Name)
}
return node, err
}
func (woc *wfOperationCtx) executeSuspend(nodeName string, templateScope string, tmpl *wfv1.Template, orgTmpl wfv1.TemplateReferenceHolder, opts *executeTemplateOpts) (*wfv1.NodeStatus, error) {
node := woc.wf.GetNodeByName(nodeName)
if node == nil {
node = woc.initializeExecutableNode(nodeName, wfv1.NodeTypeSuspend, templateScope, tmpl, orgTmpl, opts.boundaryID, wfv1.NodePending)
}
woc.log.Infof("node %s suspended", nodeName)
// If there is either an active workflow deadline, or if this node is suspended with a duration, then the workflow
// will need to be requeued after a certain amount of time
var requeueTime *time.Time
if tmpl.Suspend.Duration != "" {
node := woc.wf.GetNodeByName(nodeName)
suspendDuration, err := parseStringToDuration(tmpl.Suspend.Duration)
if err != nil {
return node, err
}
suspendDeadline := node.StartedAt.Add(suspendDuration)
requeueTime = &suspendDeadline
if time.Now().UTC().After(suspendDeadline) {
// Suspension is expired, node can be resumed
woc.log.Infof("auto resuming node %s", nodeName)
_ = woc.markNodePhase(nodeName, wfv1.NodeSucceeded)
return node, nil
}
}
// workflowDeadline is the time when the workflow will be timed out, if any
if workflowDeadline := woc.getWorkflowDeadline(); workflowDeadline != nil {
// There is an active workflow deadline. If this node is suspended with a duration, choose the earlier time
// between the two, otherwise choose the deadline time.
if requeueTime == nil || workflowDeadline.Before(*requeueTime) {
requeueTime = workflowDeadline
}
}
if requeueTime != nil {
woc.requeue(time.Until(*requeueTime))
}
_ = woc.markNodePhase(nodeName, wfv1.NodeRunning)
return node, nil
}
func addRawOutputFields(node *wfv1.NodeStatus, tmpl *wfv1.Template) *wfv1.NodeStatus {
if tmpl.GetType() != wfv1.TemplateTypeSuspend || node.Type != wfv1.NodeTypeSuspend {
panic("addRawOutputFields should only be used for nodes and templates of type suspend")
}
for _, param := range tmpl.Outputs.Parameters {
if param.ValueFrom.Supplied != nil {
if node.Outputs == nil {
node.Outputs = &wfv1.Outputs{Parameters: []wfv1.Parameter{}}
}
node.Outputs.Parameters = append(node.Outputs.Parameters, param)
}
}
return node
}
func parseStringToDuration(durationString string) (time.Duration, error) {
var suspendDuration time.Duration
// If no units are attached, treat as seconds
if val, err := strconv.Atoi(durationString); err == nil {
suspendDuration = time.Duration(val) * time.Second
} else if duration, err := time.ParseDuration(durationString); err == nil {
suspendDuration = duration
} else {
return 0, fmt.Errorf("unable to parse %s as a duration", durationString)
}
return suspendDuration, nil
}
func processItem(fstTmpl *fasttemplate.Template, name string, index int, item wfv1.Item, obj interface{}) (string, error) {
replaceMap := make(map[string]string)
var newName string
switch item.GetType() {
case wfv1.String, wfv1.Number, wfv1.Bool:
replaceMap["item"] = fmt.Sprintf("%v", item)
newName = generateNodeName(name, index, item)
case wfv1.Map:
// Handle the case when withItems is a list of maps.
// vals holds stringified versions of the map items which are incorporated as part of the step name.
// For example if the item is: {"name": "jesse","group":"developer"}
// the vals would be: ["name:jesse", "group:developer"]
// This would eventually be part of the step name (group:developer,name:jesse)
vals := make([]string, 0)
mapVal := item.GetMapVal()
for itemKey, itemVal := range mapVal {
replaceMap[fmt.Sprintf("item.%s", itemKey)] = fmt.Sprintf("%v", itemVal)
vals = append(vals, fmt.Sprintf("%s:%v", itemKey, itemVal))
}
jsonByteVal, err := json.Marshal(mapVal)
if err != nil {
return "", errors.InternalWrapError(err)
}
replaceMap["item"] = string(jsonByteVal)
// sort the values so that the name is deterministic
sort.Strings(vals)
newName = generateNodeName(name, index, strings.Join(vals, ","))
case wfv1.List:
listVal := item.GetListVal()
byteVal, err := json.Marshal(listVal)
if err != nil {
return "", errors.InternalWrapError(err)
}
replaceMap["item"] = string(byteVal)
newName = generateNodeName(name, index, listVal)
default:
return "", errors.Errorf(errors.CodeBadRequest, "withItems[%d] expected string, number, list, or map. received: %v", index, item)
}
newStepStr, err := common.Replace(fstTmpl, replaceMap, false)
if err != nil {
return "", err
}
err = json.Unmarshal([]byte(newStepStr), &obj)
if err != nil {
return "", errors.InternalWrapError(err)
}
return newName, nil
}
func generateNodeName(name string, index int, desc interface{}) string {
newName := fmt.Sprintf("%s(%d:%v)", name, index, desc)
if out := util.RecoverIndexFromNodeName(newName); out != index {
panic(fmt.Sprintf("unrecoverable digit in generateName; wanted '%d' and got '%d'", index, out))
}
return newName
}
func expandSequence(seq *wfv1.Sequence) ([]wfv1.Item, error) {
var start, end int
var err error
if seq.Start != nil {
start, err = strconv.Atoi(seq.Start.String())
if err != nil {
return nil, err
}
}
if seq.End != nil {
end, err = strconv.Atoi(seq.End.String())
if err != nil {
return nil, err
}
} else if seq.Count != nil {
count, err := strconv.Atoi(seq.Count.String())
if err != nil {
return nil, err
}
if count == 0 {
return []wfv1.Item{}, nil
}
end = start + count - 1
} else {
return nil, errors.InternalError("neither end nor count was specified in withSequence")
}
items := make([]wfv1.Item, 0)
format := "%d"
if seq.Format != "" {
format = seq.Format
}
if start <= end {
for i := start; i <= end; i++ {
item, err := wfv1.ParseItem(`"` + fmt.Sprintf(format, i) + `"`)
if err != nil {
return nil, err
}
items = append(items, item)
}
} else {
for i := start; i >= end; i-- {
item, err := wfv1.ParseItem(`"` + fmt.Sprintf(format, i) + `"`)
if err != nil {
return nil, err
}
items = append(items, item)
}
}
return items, nil
}
func (woc *wfOperationCtx) substituteParamsInVolumes(params map[string]string) error {
if woc.volumes == nil {
return nil
}
volumes := woc.volumes
volumesBytes, err := json.Marshal(volumes)
if err != nil {
return errors.InternalWrapError(err)
}
fstTmpl, err := fasttemplate.NewTemplate(string(volumesBytes), "{{", "}}")
if err != nil {
return fmt.Errorf("unable to parse argo varaible: %w", err)
}
newVolumesStr, err := common.Replace(fstTmpl, params, true)
if err != nil {
return err
}
var newVolumes []apiv1.Volume
err = json.Unmarshal([]byte(newVolumesStr), &newVolumes)
if err != nil {
return errors.InternalWrapError(err)
}
woc.volumes = newVolumes
return nil
}
// createTemplateContext creates a new template context.
func (woc *wfOperationCtx) createTemplateContext(scope wfv1.ResourceScope, resourceName string) (*templateresolution.Context, error) {
var clusterWorkflowTemplateGetter templateresolution.ClusterWorkflowTemplateGetter
if woc.controller.cwftmplInformer != nil {
clusterWorkflowTemplateGetter = woc.controller.cwftmplInformer.Lister()
} else {
clusterWorkflowTemplateGetter = &templateresolution.NullClusterWorkflowTemplateGetter{}
}
ctx := templateresolution.NewContext(woc.controller.wftmplInformer.Lister().WorkflowTemplates(woc.wf.Namespace), clusterWorkflowTemplateGetter, woc.execWf, woc.wf)
switch scope {
case wfv1.ResourceScopeNamespaced:
return ctx.WithWorkflowTemplate(resourceName)
case wfv1.ResourceScopeCluster:
return ctx.WithClusterWorkflowTemplate(resourceName)
default:
return ctx, nil
}
}
func (woc *wfOperationCtx) runOnExitNode(templateRef, parentDisplayName, parentNodeName, boundaryID string, tmplCtx *templateresolution.Context) (bool, *wfv1.NodeStatus, error) {
if templateRef != "" && woc.wf.Spec.Shutdown.ShouldExecute(true) {
woc.log.Infof("Running OnExit handler: %s", templateRef)
onExitNodeName := common.GenerateOnExitNodeName(parentDisplayName)
onExitNode, err := woc.executeTemplate(onExitNodeName, &wfv1.WorkflowStep{Template: templateRef}, tmplCtx, woc.execWf.Spec.Arguments, &executeTemplateOpts{
boundaryID: boundaryID,
onExitTemplate: true,
})
woc.addChildNode(parentNodeName, onExitNodeName)
return true, onExitNode, err
}
return false, nil, nil
}
func (woc *wfOperationCtx) computeMetrics(metricList []*wfv1.Prometheus, localScope map[string]string, realTimeScope map[string]func() float64, realTimeOnly bool) {
for _, metricTmpl := range metricList {
// Don't process real time metrics after execution
if realTimeOnly && !metricTmpl.IsRealtime() {
continue
}
if metricTmpl.Help == "" {
woc.reportMetricEmissionError(fmt.Sprintf("metric '%s' must contain a help string under 'help: ' field", metricTmpl.Name))
continue
}
// Substitute parameters in non-value fields of the template to support variables in places such as labels,
// name, and help. We do not substitute value fields here (i.e. gauge, histogram, counter) here because they
// might be realtime ({{workflow.duration}} will not be substituted the same way if it's realtime or if it isn't).
metricTmplBytes, err := json.Marshal(metricTmpl)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s' (marshal): %s", metricTmpl.Name, err))
continue
}
fstTmpl, err := fasttemplate.NewTemplate(string(metricTmplBytes), "{{", "}}")
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to parse argo varaible for metric '%s': %s", metricTmpl.Name, err))
continue
}
replacedValue, err := common.Replace(fstTmpl, localScope, false)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s': %s", metricTmpl.Name, err))
continue
}
var metricTmplSubstituted wfv1.Prometheus
err = json.Unmarshal([]byte(replacedValue), &metricTmplSubstituted)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s' (unmarshal): %s", metricTmpl.Name, err))
continue
}
// Only substitute non-value fields here. Value field substitution happens below
metricTmpl.Name = metricTmplSubstituted.Name
metricTmpl.Help = metricTmplSubstituted.Help
metricTmpl.Labels = metricTmplSubstituted.Labels
metricTmpl.When = metricTmplSubstituted.When
proceed, err := shouldExecute(metricTmpl.When)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to compute 'when' clause for metric '%s': %s", woc.wf.ObjectMeta.Name, err))
continue
}
if !proceed {
continue
}
if metricTmpl.IsRealtime() {
// Finally substitute value parameters
value := metricTmpl.Gauge.Value
if !(strings.HasPrefix(value, "{{") && strings.HasSuffix(value, "}}")) {
woc.reportMetricEmissionError("real time metrics can only be used with metric variables")
continue
}
value = strings.TrimSuffix(strings.TrimPrefix(value, "{{"), "}}")
valueFunc, ok := realTimeScope[value]
if !ok {
woc.reportMetricEmissionError(fmt.Sprintf("'%s' is not available as a real time metric", value))
continue
}
updatedMetric, err := metrics.ConstructRealTimeGaugeMetric(metricTmpl, valueFunc)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricTmpl.Name, err))
continue
}
err = woc.controller.metrics.UpsertCustomMetric(metricTmpl.GetDesc(), string(woc.wf.UID), updatedMetric, true)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricTmpl.Name, err))
continue
}
continue
} else {
metricSpec := metricTmpl.DeepCopy()
// Finally substitute value parameters
fstTmpl, err = fasttemplate.NewTemplate(metricSpec.GetValueString(), "{{", "}}")
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to parse argo varaible for metric '%s': %s", metricTmpl.Name, err))
continue
}
replacedValue, err := common.Replace(fstTmpl, localScope, false)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("unable to substitute parameters for metric '%s': %s", metricSpec.Name, err))
continue
}
metricSpec.SetValueString(replacedValue)
metric := woc.controller.metrics.GetCustomMetric(metricSpec.GetDesc())
// It is valid to pass a nil metric to ConstructOrUpdateMetric, in that case the metric will be created for us
updatedMetric, err := metrics.ConstructOrUpdateMetric(metric, metricSpec)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricSpec.Name, err))
continue
}
err = woc.controller.metrics.UpsertCustomMetric(metricSpec.GetDesc(), string(woc.wf.UID), updatedMetric, false)
if err != nil {
woc.reportMetricEmissionError(fmt.Sprintf("could not construct metric '%s': %s", metricSpec.Name, err))
continue
}
continue
}
}
}
func (woc *wfOperationCtx) reportMetricEmissionError(errorString string) {
woc.wf.Status.Conditions.UpsertConditionMessage(
wfv1.Condition{
Status: metav1.ConditionTrue,
Type: wfv1.ConditionTypeMetricsError,
Message: errorString,
})
woc.updated = true
woc.log.Error(errorString)
}
func (woc *wfOperationCtx) createPDBResource() error {
if woc.execWf.Spec.PodDisruptionBudget == nil {
return nil
}
pdb, err := woc.controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets(woc.wf.Namespace).Get(woc.wf.Name, metav1.GetOptions{})
if err != nil && !apierr.IsNotFound(err) {
return err
}
if pdb != nil && pdb.Name != "" {
return nil
}
pdbSpec := *woc.execWf.Spec.PodDisruptionBudget
if pdbSpec.Selector == nil {
pdbSpec.Selector = &metav1.LabelSelector{
MatchLabels: map[string]string{common.LabelKeyWorkflow: woc.wf.Name},
}
}
newPDB := policyv1beta.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: woc.wf.Name,
Labels: map[string]string{common.LabelKeyWorkflow: woc.wf.Name},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(woc.wf, wfv1.SchemeGroupVersion.WithKind(workflow.WorkflowKind)),
},
},
Spec: pdbSpec,
}
_, err = woc.controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets(woc.wf.Namespace).Create(&newPDB)
if err != nil {
return err
}
woc.log.Infof("Created PDB resource for workflow.")
woc.updated = true
return nil
}
func (woc *wfOperationCtx) deletePDBResource() error {
if woc.execWf.Spec.PodDisruptionBudget == nil {
return nil
}
err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
err := woc.controller.kubeclientset.PolicyV1beta1().PodDisruptionBudgets(woc.wf.Namespace).Delete(woc.wf.Name, &metav1.DeleteOptions{})
if err != nil && !apierr.IsNotFound(err) {
woc.log.WithField("err", err).Warn("Failed to delete PDB.")
if !errorsutil.IsTransientErr(err) {
return false, err
}
return false, nil
}
return true, nil
})
if err != nil {
woc.log.WithField("err", err).Error("Unable to delete PDB resource for workflow.")
return err
}
woc.log.Infof("Deleted PDB resource for workflow.")
return nil
}
// Check if the output of this node is referenced elsewhere in the Workflow. If so, make sure to include it during
// execution.
func (woc *wfOperationCtx) includeScriptOutput(nodeName, boundaryID string) (bool, error) {
if boundaryNode, ok := woc.wf.Status.Nodes[boundaryID]; ok {
tmplCtx, err := woc.createTemplateContext(boundaryNode.GetTemplateScope())
if err != nil {
return false, err
}
_, parentTemplate, templateStored, err := tmplCtx.ResolveTemplate(&boundaryNode)
if err != nil {
return false, err
}
// A new template was stored during resolution, persist it
if templateStored {
woc.updated = true
}
name := getStepOrDAGTaskName(nodeName)
return hasOutputResultRef(name, parentTemplate), nil
}
return false, nil
}
func (woc *wfOperationCtx) fetchWorkflowSpec() (wfv1.WorkflowSpecHolder, error) {
if woc.wf.Spec.WorkflowTemplateRef == nil {
return nil, fmt.Errorf("cannot fetch workflow spec without workflowTemplateRef")
}
var specHolder wfv1.WorkflowSpecHolder
var err error
// Logic for workflow refers Workflow template
if woc.wf.Spec.WorkflowTemplateRef.ClusterScope {
specHolder, err = woc.controller.cwftmplInformer.Lister().Get(woc.wf.Spec.WorkflowTemplateRef.Name)
} else {
specHolder, err = woc.controller.wftmplInformer.Lister().WorkflowTemplates(woc.wf.Namespace).Get(woc.wf.Spec.WorkflowTemplateRef.Name)
}
if err != nil {
return nil, err
}
return specHolder, nil
}
func (woc *wfOperationCtx) retryStrategy(tmpl *wfv1.Template) *wfv1.RetryStrategy {
if tmpl != nil && tmpl.RetryStrategy != nil {
return tmpl.RetryStrategy
}
return woc.execWf.Spec.RetryStrategy
}
func (woc *wfOperationCtx) setExecWorkflow() error {
if woc.wf.Spec.WorkflowTemplateRef != nil {
err := woc.setStoredWfSpec()
if err != nil {
return err
}
woc.execWf = &wfv1.Workflow{Spec: *woc.wf.Status.StoredWorkflowSpec.DeepCopy()}
woc.volumes = woc.execWf.Spec.DeepCopy().Volumes
} else if woc.controller.Config.WorkflowRestrictions.MustUseReference() {
return fmt.Errorf("workflows must use workflowTemplateRef to be executed when the controller is in reference mode")
} else {
err := woc.controller.setWorkflowDefaults(woc.wf)
if err != nil {
return err
}
woc.volumes = woc.wf.Spec.DeepCopy().Volumes
}
return nil
}
func (woc *wfOperationCtx) setStoredWfSpec() error {
wfDefault := woc.controller.Config.WorkflowDefaults
if wfDefault == nil {
wfDefault = &wfv1.Workflow{}
}
if woc.wf.Status.StoredWorkflowSpec == nil {
wftHolder, err := woc.fetchWorkflowSpec()
if err != nil {
return err
}
// Join WFT and WfDefault metadata to Workflow metadata.
wfutil.JoinWorkflowMetaData(&woc.wf.ObjectMeta, wftHolder.GetWorkflowMetadata(), &wfDefault.ObjectMeta)
// Join workflow, workflow template, and workflow default metadata to workflow spec.
mergedWf, err := wfutil.JoinWorkflowSpec(&woc.wf.Spec, wftHolder.GetWorkflowSpec(), &wfDefault.Spec)
if err != nil {
return err
}
woc.wf.Status.StoredWorkflowSpec = &mergedWf.Spec
woc.updated = true
} else if woc.controller.Config.WorkflowRestrictions.MustNotChangeSpec() {
wftHolder, err := woc.fetchWorkflowSpec()
if err != nil {
return err
}
mergedWf, err := wfutil.JoinWorkflowSpec(&woc.wf.Spec, wftHolder.GetWorkflowSpec(), &wfDefault.Spec)
if err != nil {
return err
}
if mergedWf.Spec.String() != woc.wf.Status.StoredWorkflowSpec.String() {
return fmt.Errorf("workflowTemplateRef reference may not change during execution when the controller is in reference mode")
}
}
return nil
}
|
[
"\"INFORMER_WRITE_BACK\""
] |
[] |
[
"INFORMER_WRITE_BACK"
] |
[]
|
["INFORMER_WRITE_BACK"]
|
go
| 1 | 0 | |
test_validate_input_file.py
|
import json
import yaml
from jsonschema import validate
import os
configuration_file = os.environ['SC_ENABLER_CONF']
with open(configuration_file, 'r') as conf_file:
input_config = yaml.safe_load(conf_file)
with open("./input_schema_validator.json", 'r') as schema_file:
schema = json.load(schema_file)
def test_input_params():
validate(instance=input_config, schema=schema)
|
[] |
[] |
[
"SC_ENABLER_CONF"
] |
[]
|
["SC_ENABLER_CONF"]
|
python
| 1 | 0 | |
internal/lsp/cache/view.go
|
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cache implements the caching layer for gopls.
package cache
import (
"context"
"encoding/json"
"fmt"
"go/build"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"sort"
"strings"
"sync"
"time"
"golang.org/x/mod/modfile"
"golang.org/x/mod/semver"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/imports"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/memoize"
"golang.org/x/tools/internal/span"
"golang.org/x/tools/internal/xcontext"
errors "golang.org/x/xerrors"
)
type View struct {
session *Session
id string
optionsMu sync.Mutex
options source.Options
// mu protects most mutable state of the view.
mu sync.Mutex
// baseCtx is the context handed to NewView. This is the parent of all
// background contexts created for this view.
baseCtx context.Context
// backgroundCtx is the current context used by background tasks initiated
// by the view.
backgroundCtx context.Context
// cancel is called when all action being performed by the current view
// should be stopped.
cancel context.CancelFunc
// name is the user visible name of this view.
name string
// folder is the folder with which this view was constructed.
folder span.URI
// root is the root directory of this view. If we are in GOPATH mode, this
// is just the folder. If we are in module mode, this is the module root.
root span.URI
// TODO: The modules and workspaceModule fields should probably be moved to
// the snapshot and invalidated on file changes.
// modules is the set of modules currently in this workspace.
modules map[span.URI]*moduleRoot
// workspaceModule is an in-memory representation of the go.mod file for
// the workspace module.
workspaceModule *modfile.File
// importsMu guards imports-related state, particularly the ProcessEnv.
importsMu sync.Mutex
// processEnv is the process env for this view.
// Some of its fields can be changed dynamically by modifications to
// the view's options. These fields are repopulated for every use.
// Note: this contains cached module and filesystem state.
//
// TODO(suzmue): the state cached in the process env is specific to each view,
// however, there is state that can be shared between views that is not currently
// cached, like the module cache.
processEnv *imports.ProcessEnv
cleanupProcessEnv func()
cacheRefreshDuration time.Duration
cacheRefreshTimer *time.Timer
cachedModFileIdentifier string
cachedBuildFlags []string
// keep track of files by uri and by basename, a single file may be mapped
// to multiple uris, and the same basename may map to multiple files
filesByURI map[span.URI]*fileBase
filesByBase map[string][]*fileBase
snapshotMu sync.Mutex
snapshot *snapshot
// initialized is closed when the view has been fully initialized. On
// initialization, the view's workspace packages are loaded. All of the
// fields below are set as part of initialization. If we failed to load, we
// only retry if the go.mod file changes, to avoid too many go/packages
// calls.
//
// When the view is created, initializeOnce is non-nil, initialized is
// open, and initCancelFirstAttempt can be used to terminate
// initialization. Once initialization completes, initializedErr may be set
// and initializeOnce becomes nil. If initializedErr is non-nil,
// initialization may be retried (depending on how files are changed). To
// indicate that initialization should be retried, initializeOnce will be
// set. The next time a caller requests workspace packages, the
// initialization will retry.
initialized chan struct{}
initCancelFirstAttempt context.CancelFunc
// initializationSema is used as a mutex to guard initializeOnce and
// initializedErr, which will be updated after each attempt to initialize
// the view. We use a channel instead of a mutex to avoid blocking when a
// context is canceled.
initializationSema chan struct{}
initializeOnce *sync.Once
initializedErr error
// True if the view is either in GOPATH, a module, or some other
// non go command build system.
hasValidBuildConfiguration bool
// The real go.mod and go.sum files that are attributed to a view.
modURI, sumURI span.URI
// The Go version in use: X in Go 1.X.
goversion int
// workspaceMode describes the way in which the view's workspace should be
// loaded.
workspaceMode workspaceMode
// hasGopackagesDriver is true if the user has a value set for the
// GOPACKAGESDRIVER environment variable or a gopackagesdriver binary on
// their machine.
hasGopackagesDriver bool
// `go env` variables that need to be tracked by gopls.
gocache, gomodcache, gopath, goprivate string
// goEnv is the `go env` output collected when a view is created.
// It includes the values of the environment variables above.
goEnv map[string]string
}
type workspaceMode int
const (
moduleMode workspaceMode = 1 << iota
// tempModfile indicates whether or not the -modfile flag should be used.
tempModfile
// usesWorkspaceModule indicates support for the experimental workspace module
// feature.
usesWorkspaceModule
)
type builtinPackageHandle struct {
handle *memoize.Handle
}
type builtinPackageData struct {
parsed *source.BuiltinPackage
err error
}
type moduleRoot struct {
rootURI span.URI
modURI, sumURI span.URI
}
// fileBase holds the common functionality for all files.
// It is intended to be embedded in the file implementations
type fileBase struct {
uris []span.URI
fname string
view *View
}
func (f *fileBase) URI() span.URI {
return f.uris[0]
}
func (f *fileBase) filename() string {
return f.fname
}
func (f *fileBase) addURI(uri span.URI) int {
f.uris = append(f.uris, uri)
return len(f.uris)
}
func (v *View) ID() string { return v.id }
func (v *View) ValidBuildConfiguration() bool {
return v.hasValidBuildConfiguration
}
func (v *View) ModFile() span.URI {
return v.modURI
}
// tempModFile creates a temporary go.mod file based on the contents of the
// given go.mod file. It is the caller's responsibility to clean up the files
// when they are done using them.
func tempModFile(modFh, sumFH source.FileHandle) (tmpURI span.URI, cleanup func(), err error) {
filenameHash := hashContents([]byte(modFh.URI().Filename()))
tmpMod, err := ioutil.TempFile("", fmt.Sprintf("go.%s.*.mod", filenameHash))
if err != nil {
return "", nil, err
}
defer tmpMod.Close()
tmpURI = span.URIFromPath(tmpMod.Name())
tmpSumName := sumFilename(tmpURI)
content, err := modFh.Read()
if err != nil {
return "", nil, err
}
if _, err := tmpMod.Write(content); err != nil {
return "", nil, err
}
cleanup = func() {
_ = os.Remove(tmpSumName)
_ = os.Remove(tmpURI.Filename())
}
// Be careful to clean up if we return an error from this function.
defer func() {
if err != nil {
cleanup()
cleanup = nil
}
}()
// Create an analogous go.sum, if one exists.
if sumFH != nil {
sumContents, err := sumFH.Read()
if err != nil {
return "", cleanup, err
}
if err := ioutil.WriteFile(tmpSumName, sumContents, 0655); err != nil {
return "", cleanup, err
}
}
return tmpURI, cleanup, nil
}
func (v *View) Session() source.Session {
return v.session
}
// Name returns the user visible name of this view.
func (v *View) Name() string {
return v.name
}
// Folder returns the root of this view.
func (v *View) Folder() span.URI {
return v.folder
}
func (v *View) Options() source.Options {
v.optionsMu.Lock()
defer v.optionsMu.Unlock()
return v.options
}
func minorOptionsChange(a, b source.Options) bool {
// Check if any of the settings that modify our understanding of files have been changed
mapEnv := func(env []string) map[string]string {
m := make(map[string]string, len(env))
for _, x := range env {
split := strings.SplitN(x, "=", 2)
if len(split) != 2 {
continue
}
m[split[0]] = split[1]
}
return m
}
aEnv := mapEnv(a.Env)
bEnv := mapEnv(b.Env)
if !reflect.DeepEqual(aEnv, bEnv) {
return false
}
aBuildFlags := make([]string, len(a.BuildFlags))
bBuildFlags := make([]string, len(b.BuildFlags))
copy(aBuildFlags, a.BuildFlags)
copy(bBuildFlags, b.BuildFlags)
sort.Strings(aBuildFlags)
sort.Strings(bBuildFlags)
if !reflect.DeepEqual(aBuildFlags, bBuildFlags) {
return false
}
// the rest of the options are benign
return true
}
func (v *View) SetOptions(ctx context.Context, options source.Options) (source.View, error) {
// no need to rebuild the view if the options were not materially changed
v.optionsMu.Lock()
if minorOptionsChange(v.options, options) {
v.options = options
v.optionsMu.Unlock()
return v, nil
}
v.optionsMu.Unlock()
newView, err := v.session.updateView(ctx, v, options)
return newView, err
}
func (v *View) Rebuild(ctx context.Context) (source.Snapshot, func(), error) {
newView, err := v.session.updateView(ctx, v, v.Options())
if err != nil {
return nil, func() {}, err
}
snapshot, release := newView.Snapshot(ctx)
return snapshot, release, nil
}
func (v *View) WriteEnv(ctx context.Context, w io.Writer) error {
v.optionsMu.Lock()
env, buildFlags := v.envLocked()
v.optionsMu.Unlock()
fullEnv := make(map[string]string)
for k, v := range v.goEnv {
fullEnv[k] = v
}
for _, v := range env {
s := strings.SplitN(v, "=", 2)
if len(s) != 2 {
continue
}
if _, ok := fullEnv[s[0]]; ok {
fullEnv[s[0]] = s[1]
}
}
fmt.Fprintf(w, "go env for %v\n(root %s)\n(valid build configuration = %v)\n(build flags: %v)\n",
v.folder.Filename(), v.root.Filename(), v.hasValidBuildConfiguration, buildFlags)
for k, v := range fullEnv {
fmt.Fprintf(w, "%s=%s\n", k, v)
}
return nil
}
func (v *View) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error) error {
v.importsMu.Lock()
defer v.importsMu.Unlock()
// Use temporary go.mod files, but always go to disk for the contents.
// Rebuilding the cache is expensive, and we don't want to do it for
// transient changes.
var modFH, sumFH source.FileHandle
var modFileIdentifier string
var err error
if v.modURI != "" {
modFH, err = v.session.cache.getFile(ctx, v.modURI)
if err != nil {
return err
}
modFileIdentifier = modFH.FileIdentity().Hash
}
if v.sumURI != "" {
sumFH, err = v.session.cache.getFile(ctx, v.sumURI)
if err != nil {
return err
}
}
// v.goEnv is immutable -- changes make a new view. Options can change.
// We can't compare build flags directly because we may add -modfile.
v.optionsMu.Lock()
localPrefix := v.options.Local
currentBuildFlags := v.options.BuildFlags
changed := !reflect.DeepEqual(currentBuildFlags, v.cachedBuildFlags) ||
v.options.VerboseOutput != (v.processEnv.Logf != nil) ||
modFileIdentifier != v.cachedModFileIdentifier
v.optionsMu.Unlock()
// If anything relevant to imports has changed, clear caches and
// update the processEnv. Clearing caches blocks on any background
// scans.
if changed {
// As a special case, skip cleanup the first time -- we haven't fully
// initialized the environment yet and calling GetResolver will do
// unnecessary work and potentially mess up the go.mod file.
if v.cleanupProcessEnv != nil {
if resolver, err := v.processEnv.GetResolver(); err == nil {
resolver.(*imports.ModuleResolver).ClearForNewMod()
}
v.cleanupProcessEnv()
}
v.cachedModFileIdentifier = modFileIdentifier
v.cachedBuildFlags = currentBuildFlags
v.cleanupProcessEnv, err = v.populateProcessEnv(ctx, modFH, sumFH)
if err != nil {
return err
}
}
// Run the user function.
opts := &imports.Options{
// Defaults.
AllErrors: true,
Comments: true,
Fragment: true,
FormatOnly: false,
TabIndent: true,
TabWidth: 8,
Env: v.processEnv,
LocalPrefix: localPrefix,
}
if err := fn(opts); err != nil {
return err
}
if v.cacheRefreshTimer == nil {
// Don't refresh more than twice per minute.
delay := 30 * time.Second
// Don't spend more than a couple percent of the time refreshing.
if adaptive := 50 * v.cacheRefreshDuration; adaptive > delay {
delay = adaptive
}
v.cacheRefreshTimer = time.AfterFunc(delay, v.refreshProcessEnv)
}
return nil
}
func (v *View) refreshProcessEnv() {
start := time.Now()
v.importsMu.Lock()
env := v.processEnv
if resolver, err := v.processEnv.GetResolver(); err == nil {
resolver.ClearForNewScan()
}
v.importsMu.Unlock()
// We don't have a context handy to use for logging, so use the stdlib for now.
event.Log(v.baseCtx, "background imports cache refresh starting")
if err := imports.PrimeCache(context.Background(), env); err == nil {
event.Log(v.baseCtx, fmt.Sprintf("background refresh finished after %v", time.Since(start)))
} else {
event.Log(v.baseCtx, fmt.Sprintf("background refresh finished after %v", time.Since(start)), keys.Err.Of(err))
}
v.importsMu.Lock()
v.cacheRefreshDuration = time.Since(start)
v.cacheRefreshTimer = nil
v.importsMu.Unlock()
}
// populateProcessEnv sets the dynamically configurable fields for the view's
// process environment. Assumes that the caller is holding the s.view.importsMu.
func (v *View) populateProcessEnv(ctx context.Context, modFH, sumFH source.FileHandle) (cleanup func(), err error) {
cleanup = func() {}
pe := v.processEnv
v.optionsMu.Lock()
pe.BuildFlags = append([]string(nil), v.options.BuildFlags...)
if v.options.VerboseOutput {
pe.Logf = func(format string, args ...interface{}) {
event.Log(ctx, fmt.Sprintf(format, args...))
}
} else {
pe.Logf = nil
}
v.optionsMu.Unlock()
pe.Env = map[string]string{}
for k, v := range v.goEnv {
pe.Env[k] = v
}
modmod, err := v.needsModEqualsMod(ctx, modFH)
if err != nil {
return cleanup, err
}
if modmod {
// -mod isn't really a build flag, but we can get away with it given
// the set of commands that goimports wants to run.
pe.BuildFlags = append([]string{"-mod=mod"}, pe.BuildFlags...)
}
// Add -modfile to the build flags, if we are using it.
if v.workspaceMode&tempModfile != 0 && modFH != nil {
var tmpURI span.URI
tmpURI, cleanup, err = tempModFile(modFH, sumFH)
if err != nil {
return nil, err
}
pe.BuildFlags = append(pe.BuildFlags, fmt.Sprintf("-modfile=%s", tmpURI.Filename()))
}
return cleanup, nil
}
// envLocked returns the environment and build flags for the current view.
// It assumes that the caller is holding the view's optionsMu.
func (v *View) envLocked() ([]string, []string) {
env := append(os.Environ(), v.options.Env...)
buildFlags := append([]string{}, v.options.BuildFlags...)
return env, buildFlags
}
func (v *View) contains(uri span.URI) bool {
return strings.HasPrefix(string(uri), string(v.root))
}
func (v *View) mapFile(uri span.URI, f *fileBase) {
v.filesByURI[uri] = f
if f.addURI(uri) == 1 {
basename := basename(f.filename())
v.filesByBase[basename] = append(v.filesByBase[basename], f)
}
}
func basename(filename string) string {
return strings.ToLower(filepath.Base(filename))
}
func (v *View) relevantChange(c source.FileModification) bool {
// If the file is known to the view, the change is relevant.
known := v.knownFile(c.URI)
// If the file is not known to the view, and the change is only on-disk,
// we should not invalidate the snapshot. This is necessary because Emacs
// sends didChangeWatchedFiles events for temp files.
if !known && c.OnDisk && (c.Action == source.Change || c.Action == source.Delete) {
return false
}
return v.contains(c.URI) || known
}
func (v *View) knownFile(uri span.URI) bool {
v.mu.Lock()
defer v.mu.Unlock()
f, err := v.findFile(uri)
return f != nil && err == nil
}
// getFile returns a file for the given URI. It will always succeed because it
// adds the file to the managed set if needed.
func (v *View) getFile(uri span.URI) (*fileBase, error) {
v.mu.Lock()
defer v.mu.Unlock()
f, err := v.findFile(uri)
if err != nil {
return nil, err
} else if f != nil {
return f, nil
}
f = &fileBase{
view: v,
fname: uri.Filename(),
}
v.mapFile(uri, f)
return f, nil
}
// findFile checks the cache for any file matching the given uri.
//
// An error is only returned for an irreparable failure, for example, if the
// filename in question does not exist.
func (v *View) findFile(uri span.URI) (*fileBase, error) {
if f := v.filesByURI[uri]; f != nil {
// a perfect match
return f, nil
}
// no exact match stored, time to do some real work
// check for any files with the same basename
fname := uri.Filename()
basename := basename(fname)
if candidates := v.filesByBase[basename]; candidates != nil {
pathStat, err := os.Stat(fname)
if os.IsNotExist(err) {
return nil, err
}
if err != nil {
return nil, nil // the file may exist, return without an error
}
for _, c := range candidates {
if cStat, err := os.Stat(c.filename()); err == nil {
if os.SameFile(pathStat, cStat) {
// same file, map it
v.mapFile(uri, c)
return c, nil
}
}
}
}
// no file with a matching name was found, it wasn't in our cache
return nil, nil
}
func (v *View) Shutdown(ctx context.Context) {
v.session.removeView(ctx, v)
}
func (v *View) shutdown(ctx context.Context) {
// Cancel the initial workspace load if it is still running.
v.initCancelFirstAttempt()
v.mu.Lock()
if v.cancel != nil {
v.cancel()
v.cancel = nil
}
v.mu.Unlock()
v.snapshotMu.Lock()
go v.snapshot.generation.Destroy()
v.snapshotMu.Unlock()
}
func (v *View) BackgroundContext() context.Context {
v.mu.Lock()
defer v.mu.Unlock()
return v.backgroundCtx
}
func (v *View) IgnoredFile(uri span.URI) bool {
filename := uri.Filename()
var prefixes []string
if v.modURI == "" {
for _, entry := range filepath.SplitList(v.gopath) {
prefixes = append(prefixes, filepath.Join(entry, "src"))
}
} else {
mainMod := filepath.Dir(v.modURI.Filename())
prefixes = []string{mainMod, v.gomodcache}
}
for _, prefix := range prefixes {
if strings.HasPrefix(filename, prefix) {
return checkIgnored(filename[len(prefix):])
}
}
return false
}
// checkIgnored implements go list's exclusion rules. go help list:
// Directory and file names that begin with "." or "_" are ignored
// by the go tool, as are directories named "testdata".
func checkIgnored(suffix string) bool {
for _, component := range strings.Split(suffix, string(filepath.Separator)) {
if len(component) == 0 {
continue
}
if component[0] == '.' || component[0] == '_' || component == "testdata" {
return true
}
}
return false
}
func (v *View) Snapshot(ctx context.Context) (source.Snapshot, func()) {
s := v.getSnapshot()
return s, s.generation.Acquire(ctx)
}
func (v *View) getSnapshot() *snapshot {
v.snapshotMu.Lock()
defer v.snapshotMu.Unlock()
return v.snapshot
}
func (v *View) initialize(ctx context.Context, s *snapshot, firstAttempt bool) {
select {
case <-ctx.Done():
return
case v.initializationSema <- struct{}{}:
}
defer func() {
<-v.initializationSema
}()
if v.initializeOnce == nil {
return
}
v.initializeOnce.Do(func() {
defer func() {
v.initializeOnce = nil
if firstAttempt {
close(v.initialized)
}
}()
// If we have multiple modules, we need to load them by paths.
var scopes []interface{}
if len(v.modules) > 0 {
// TODO(rstambler): Retry the initial workspace load for whichever
// modules we failed to load.
for _, mod := range v.modules {
fh, err := s.GetFile(ctx, mod.modURI)
if err != nil {
v.initializedErr = err
continue
}
parsed, err := s.ParseMod(ctx, fh)
if err != nil {
v.initializedErr = err
continue
}
path := parsed.File.Module.Mod.Path
scopes = append(scopes, moduleLoadScope(path))
}
} else {
scopes = append(scopes, viewLoadScope("LOAD_VIEW"))
}
err := s.load(ctx, append(scopes, packagePath("builtin"))...)
if ctx.Err() != nil {
return
}
if err != nil {
event.Error(ctx, "initial workspace load failed", err)
}
v.initializedErr = err
})
}
// AwaitInitialized waits until a view is initialized
func (v *View) AwaitInitialized(ctx context.Context) {
select {
case <-ctx.Done():
return
case <-v.initialized:
}
// We typically prefer to run something as intensive as the IWL without
// blocking. I'm not sure if there is a way to do that here.
v.initialize(ctx, v.getSnapshot(), false)
}
// invalidateContent invalidates the content of a Go file,
// including any position and type information that depends on it.
// It returns true if we were already tracking the given file, false otherwise.
func (v *View) invalidateContent(ctx context.Context, uris map[span.URI]source.VersionedFileHandle, forceReloadMetadata bool) (source.Snapshot, func()) {
// Detach the context so that content invalidation cannot be canceled.
ctx = xcontext.Detach(ctx)
// Cancel all still-running previous requests, since they would be
// operating on stale data.
v.cancelBackground()
// Do not clone a snapshot until its view has finished initializing.
v.AwaitInitialized(ctx)
// This should be the only time we hold the view's snapshot lock for any period of time.
v.snapshotMu.Lock()
defer v.snapshotMu.Unlock()
oldSnapshot := v.snapshot
v.snapshot = oldSnapshot.clone(ctx, uris, forceReloadMetadata)
go oldSnapshot.generation.Destroy()
return v.snapshot, v.snapshot.generation.Acquire(ctx)
}
func (v *View) cancelBackground() {
v.mu.Lock()
defer v.mu.Unlock()
if v.cancel == nil {
// this can happen during shutdown
return
}
v.cancel()
v.backgroundCtx, v.cancel = context.WithCancel(v.baseCtx)
}
func (v *View) maybeReinitialize() {
v.initializationSema <- struct{}{}
defer func() {
<-v.initializationSema
}()
if v.initializedErr == nil {
return
}
var once sync.Once
v.initializeOnce = &once
}
func (v *View) setBuildInformation(ctx context.Context, options source.Options) error {
if err := checkPathCase(v.Folder().Filename()); err != nil {
return errors.Errorf("invalid workspace configuration: %w", err)
}
var err error
v.goversion, err = v.goVersion(ctx, v.Options().Env)
if err != nil {
return err
}
// Make sure to get the `go env` before continuing with initialization.
modFile, err := v.setGoEnv(ctx, options.Env)
if err != nil {
return err
}
if modFile != "" {
v.workspaceMode |= moduleMode
}
if modFile == os.DevNull {
return nil
}
v.modURI = span.URIFromPath(modFile)
// Set the sumURI, if the go.sum exists.
sumFilename := filepath.Join(filepath.Dir(modFile), "go.sum")
if stat, _ := os.Stat(sumFilename); stat != nil {
v.sumURI = span.URIFromPath(sumFilename)
}
if options.ExpandWorkspaceToModule && v.modURI != "" {
v.root = span.URIFromPath(filepath.Dir(v.modURI.Filename()))
}
// The user has disabled the use of the -modfile flag or has no go.mod file.
if !options.TempModfile || v.modURI == "" {
return nil
}
if v.goversion >= 14 {
v.workspaceMode |= tempModfile
}
return nil
}
// OS-specific path case check, for case-insensitive filesystems.
var checkPathCase = defaultCheckPathCase
func defaultCheckPathCase(path string) error {
return nil
}
func (v *View) setBuildConfiguration() (isValid bool) {
defer func() {
v.hasValidBuildConfiguration = isValid
}()
// Since we only really understand the `go` command, if the user has a
// different GOPACKAGESDRIVER, assume that their configuration is valid.
if v.hasGopackagesDriver {
return true
}
// Check if the user is working within a module or if we have found
// multiple modules in the workspace.
if v.modURI != "" {
return true
}
if len(v.modules) > 0 {
return true
}
// The user may have a multiple directories in their GOPATH.
// Check if the workspace is within any of them.
for _, gp := range filepath.SplitList(v.gopath) {
if isSubdirectory(filepath.Join(gp, "src"), v.folder.Filename()) {
return true
}
}
return false
}
func isSubdirectory(root, leaf string) bool {
rel, err := filepath.Rel(root, leaf)
return err == nil && !strings.HasPrefix(rel, "..")
}
// setGoEnv sets the view's various GO* values. It also returns the view's
// GOMOD value, which need not be cached.
func (v *View) setGoEnv(ctx context.Context, configEnv []string) (string, error) {
var gomod string
vars := map[string]*string{
"GOCACHE": &v.gocache,
"GOPATH": &v.gopath,
"GOPRIVATE": &v.goprivate,
"GOMODCACHE": &v.gomodcache,
"GOMOD": &gomod,
}
// We can save ~200 ms by requesting only the variables we care about.
args := append([]string{"-json"}, imports.RequiredGoEnvVars...)
for k := range vars {
args = append(args, k)
}
inv := gocommand.Invocation{
Verb: "env",
Args: args,
Env: configEnv,
WorkingDir: v.Folder().Filename(),
}
// Don't go through runGoCommand, as we don't need a temporary -modfile to
// run `go env`.
stdout, err := v.session.gocmdRunner.Run(ctx, inv)
if err != nil {
return "", err
}
if err := json.Unmarshal(stdout.Bytes(), &v.goEnv); err != nil {
return "", err
}
for key, ptr := range vars {
*ptr = v.goEnv[key]
}
// Old versions of Go don't have GOMODCACHE, so emulate it.
if v.gomodcache == "" && v.gopath != "" {
v.gomodcache = filepath.Join(filepath.SplitList(v.gopath)[0], "pkg/mod")
}
// The value of GOPACKAGESDRIVER is not returned through the go command.
gopackagesdriver := os.Getenv("GOPACKAGESDRIVER")
for _, s := range configEnv {
split := strings.SplitN(s, "=", 2)
if split[0] == "GOPACKAGESDRIVER" {
gopackagesdriver = split[1]
}
}
// A user may also have a gopackagesdriver binary on their machine, which
// works the same way as setting GOPACKAGESDRIVER.
tool, _ := exec.LookPath("gopackagesdriver")
v.hasGopackagesDriver = gopackagesdriver != "off" && (gopackagesdriver != "" || tool != "")
return gomod, nil
}
func (v *View) IsGoPrivatePath(target string) bool {
return globsMatchPath(v.goprivate, target)
}
// Copied from
// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/str/path.go;l=58;drc=2910c5b4a01a573ebc97744890a07c1a3122c67a
func globsMatchPath(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
var glob string
if i := strings.Index(globs, ","); i >= 0 {
glob, globs = globs[:i], globs[i+1:]
} else {
glob, globs = globs, ""
}
if glob == "" {
continue
}
// A glob with N+1 path elements (N slashes) needs to be matched
// against the first N+1 path elements of target,
// which end just before the N+1'th slash.
n := strings.Count(glob, "/")
prefix := target
// Walk target, counting slashes, truncating at the N+1'th slash.
for i := 0; i < len(target); i++ {
if target[i] == '/' {
if n == 0 {
prefix = target[:i]
break
}
n--
}
}
if n > 0 {
// Not enough prefix elements.
continue
}
matched, _ := path.Match(glob, prefix)
if matched {
return true
}
}
return false
}
// This function will return the main go.mod file for this folder if it exists
// and whether the -modfile flag exists for this version of go.
func (v *View) goVersion(ctx context.Context, env []string) (int, error) {
// Check the go version by running "go list" with modules off.
// Borrowed from internal/imports/mod.go:620.
const format = `{{context.ReleaseTags}}`
inv := gocommand.Invocation{
Verb: "list",
Args: []string{"-e", "-f", format},
Env: append(env, "GO111MODULE=off"),
WorkingDir: v.root.Filename(),
}
stdoutBytes, err := v.session.gocmdRunner.Run(ctx, inv)
if err != nil {
return 0, err
}
stdout := stdoutBytes.String()
if len(stdout) < 3 {
return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout)
}
// Split up "[go1.1 go1.15]"
tags := strings.Fields(stdout[1 : len(stdout)-2])
for i := len(tags) - 1; i >= 0; i-- {
var version int
if _, err := fmt.Sscanf(build.Default.ReleaseTags[i], "go1.%d", &version); err != nil {
continue
}
return version, nil
}
return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
}
var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
func (v *View) needsModEqualsMod(ctx context.Context, modFH source.FileHandle) (bool, error) {
if v.goversion < 16 || v.workspaceMode&moduleMode == 0 {
return false, nil
}
matches := modFlagRegexp.FindStringSubmatch(v.goEnv["GOFLAGS"])
var modFlag string
if len(matches) != 0 {
modFlag = matches[1]
}
if modFlag != "" {
// Don't override an explicit '-mod=vendor' argument.
// We do want to override '-mod=readonly': it would break various module code lenses,
// and on 1.16 we know -modfile is available, so we won't mess with go.mod anyway.
return modFlag == "vendor", nil
}
// In workspace module mode, there may not be a go.mod file.
// TODO: Once vendor mode is designed, update to check if it's on, however that works.
if modFH == nil {
return true, nil
}
modBytes, err := modFH.Read()
if err != nil {
return false, err
}
modFile, err := modfile.Parse(modFH.URI().Filename(), modBytes, nil)
if err != nil {
return false, err
}
if fi, err := os.Stat(filepath.Join(filepath.Dir(v.modURI.Filename()), "vendor")); err != nil || !fi.IsDir() {
return true, nil
}
vendorEnabled := modFile.Go.Version != "" && semver.Compare("v"+modFile.Go.Version, "v1.14") >= 0
return !vendorEnabled, nil
}
|
[
"\"GOPACKAGESDRIVER\""
] |
[] |
[
"GOPACKAGESDRIVER"
] |
[]
|
["GOPACKAGESDRIVER"]
|
go
| 1 | 0 | |
models/unit_tests.go
|
// Copyright 2016 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"fmt"
"io/ioutil"
"math"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/setting"
"github.com/Unknwon/com"
"github.com/go-xorm/xorm"
"github.com/stretchr/testify/assert"
"gopkg.in/testfixtures.v2"
"xorm.io/core"
)
// NonexistentID an ID that will never exist
const NonexistentID = int64(math.MaxInt64)
// giteaRoot a path to the gitea root
var giteaRoot string
func fatalTestError(fmtStr string, args ...interface{}) {
fmt.Fprintf(os.Stderr, fmtStr, args...)
os.Exit(1)
}
// MainTest a reusable TestMain(..) function for unit tests that need to use a
// test database. Creates the test database, and sets necessary settings.
func MainTest(m *testing.M, pathToGiteaRoot string) {
var err error
giteaRoot = pathToGiteaRoot
fixturesDir := filepath.Join(pathToGiteaRoot, "models", "fixtures")
if err = createTestEngine(fixturesDir); err != nil {
fatalTestError("Error creating test engine: %v\n", err)
}
setting.AppURL = "https://try.gitea.io/"
setting.RunUser = "runuser"
setting.SSH.Port = 3000
setting.SSH.Domain = "try.gitea.io"
setting.UseSQLite3 = true
setting.RepoRootPath, err = ioutil.TempDir(os.TempDir(), "repos")
if err != nil {
fatalTestError("TempDir: %v\n", err)
}
setting.AppDataPath, err = ioutil.TempDir(os.TempDir(), "appdata")
if err != nil {
fatalTestError("TempDir: %v\n", err)
}
setting.AppWorkPath = pathToGiteaRoot
setting.StaticRootPath = pathToGiteaRoot
setting.GravatarSourceURL, err = url.Parse("https://secure.gravatar.com/avatar/")
if err != nil {
fatalTestError("url.Parse: %v\n", err)
}
exitStatus := m.Run()
if err = removeAllWithRetry(setting.RepoRootPath); err != nil {
fatalTestError("os.RemoveAll: %v\n", err)
}
if err = removeAllWithRetry(setting.AppDataPath); err != nil {
fatalTestError("os.RemoveAll: %v\n", err)
}
os.Exit(exitStatus)
}
func createTestEngine(fixturesDir string) error {
var err error
x, err = xorm.NewEngine("sqlite3", "file::memory:?cache=shared")
if err != nil {
return err
}
x.SetMapper(core.GonicMapper{})
if err = x.StoreEngine("InnoDB").Sync2(tables...); err != nil {
return err
}
switch os.Getenv("GITEA_UNIT_TESTS_VERBOSE") {
case "true", "1":
x.ShowSQL(true)
}
return InitFixtures(&testfixtures.SQLite{}, fixturesDir)
}
func removeAllWithRetry(dir string) error {
var err error
for i := 0; i < 20; i++ {
err = os.RemoveAll(dir)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return err
}
// PrepareTestDatabase load test fixtures into test database
func PrepareTestDatabase() error {
return LoadFixtures()
}
// PrepareTestEnv prepares the environment for unit tests. Can only be called
// by tests that use the above MainTest(..) function.
func PrepareTestEnv(t testing.TB) {
assert.NoError(t, PrepareTestDatabase())
assert.NoError(t, removeAllWithRetry(setting.RepoRootPath))
metaPath := filepath.Join(giteaRoot, "integrations", "gitea-repositories-meta")
assert.NoError(t, com.CopyDir(metaPath, setting.RepoRootPath))
base.SetupGiteaRoot() // Makes sure GITEA_ROOT is set
}
type testCond struct {
query interface{}
args []interface{}
}
// Cond create a condition with arguments for a test
func Cond(query interface{}, args ...interface{}) interface{} {
return &testCond{query: query, args: args}
}
func whereConditions(sess *xorm.Session, conditions []interface{}) {
for _, condition := range conditions {
switch cond := condition.(type) {
case *testCond:
sess.Where(cond.query, cond.args...)
default:
sess.Where(cond)
}
}
}
func loadBeanIfExists(bean interface{}, conditions ...interface{}) (bool, error) {
sess := x.NewSession()
defer sess.Close()
whereConditions(sess, conditions)
return sess.Get(bean)
}
// BeanExists for testing, check if a bean exists
func BeanExists(t testing.TB, bean interface{}, conditions ...interface{}) bool {
exists, err := loadBeanIfExists(bean, conditions...)
assert.NoError(t, err)
return exists
}
// AssertExistsAndLoadBean assert that a bean exists and load it from the test
// database
func AssertExistsAndLoadBean(t testing.TB, bean interface{}, conditions ...interface{}) interface{} {
exists, err := loadBeanIfExists(bean, conditions...)
assert.NoError(t, err)
assert.True(t, exists,
"Expected to find %+v (of type %T, with conditions %+v), but did not",
bean, bean, conditions)
return bean
}
// GetCount get the count of a bean
func GetCount(t testing.TB, bean interface{}, conditions ...interface{}) int {
sess := x.NewSession()
defer sess.Close()
whereConditions(sess, conditions)
count, err := sess.Count(bean)
assert.NoError(t, err)
return int(count)
}
// AssertNotExistsBean assert that a bean does not exist in the test database
func AssertNotExistsBean(t testing.TB, bean interface{}, conditions ...interface{}) {
exists, err := loadBeanIfExists(bean, conditions...)
assert.NoError(t, err)
assert.False(t, exists)
}
// AssertExistsIf asserts that a bean exists or does not exist, depending on
// what is expected.
func AssertExistsIf(t *testing.T, expected bool, bean interface{}, conditions ...interface{}) {
exists, err := loadBeanIfExists(bean, conditions...)
assert.NoError(t, err)
assert.Equal(t, expected, exists)
}
// AssertSuccessfulInsert assert that beans is successfully inserted
func AssertSuccessfulInsert(t testing.TB, beans ...interface{}) {
_, err := x.Insert(beans...)
assert.NoError(t, err)
}
// AssertCount assert the count of a bean
func AssertCount(t testing.TB, bean interface{}, expected interface{}) {
assert.EqualValues(t, expected, GetCount(t, bean))
}
// AssertInt64InRange assert value is in range [low, high]
func AssertInt64InRange(t testing.TB, low, high, value int64) {
assert.True(t, value >= low && value <= high,
"Expected value in range [%d, %d], found %d", low, high, value)
}
|
[
"\"GITEA_UNIT_TESTS_VERBOSE\""
] |
[] |
[
"GITEA_UNIT_TESTS_VERBOSE"
] |
[]
|
["GITEA_UNIT_TESTS_VERBOSE"]
|
go
| 1 | 0 | |
featuretools/utils/cli_utils.py
|
import locale
import os
import platform
import struct
import subprocess
import sys
import pkg_resources
import featuretools
deps = ["numpy", "pandas", "tqdm", "PyYAML", "cloudpickle",
"dask", "distributed", "psutil", "Click",
"pip", "setuptools"]
def show_info():
subprocess.run(["featuretools", "info"])
def print_info():
print("Featuretools version: %s" % featuretools.__version__)
print("Featuretools installation directory: %s" % get_featuretools_root())
print_sys_info()
print_deps(deps)
def print_sys_info():
print("\nSYSTEM INFO")
print("-----------")
sys_info = get_sys_info()
for k, stat in sys_info:
print("{k}: {stat}".format(k=k, stat=stat))
def print_deps(dependencies):
print("\nINSTALLED VERSIONS")
print("------------------")
installed_packages = get_installed_packages()
package_dep = []
for x in dependencies:
# prevents uninstalled deps from being printed
if x in installed_packages:
package_dep.append((x, installed_packages[x]))
for k, stat in package_dep:
print("{k}: {stat}".format(k=k, stat=stat))
# Modified from here
# https://github.com/pandas-dev/pandas/blob/d9a037ec4ad0aab0f5bf2ad18a30554c38299e57/pandas/util/_print_versions.py#L11
def get_sys_info():
"Returns system information as a dict"
blob = []
try:
(sysname, nodename, release,
version, machine, processor) = platform.uname()
blob.extend([
("python", '.'.join(map(str, sys.version_info))),
("python-bits", struct.calcsize("P") * 8),
("OS", "{sysname}".format(sysname=sysname)),
("OS-release", "{release}".format(release=release)),
("machine", "{machine}".format(machine=machine)),
("processor", "{processor}".format(processor=processor)),
("byteorder", "{byteorder}".format(byteorder=sys.byteorder)),
("LC_ALL", "{lc}".format(lc=os.environ.get('LC_ALL', "None"))),
("LANG", "{lang}".format(lang=os.environ.get('LANG', "None"))),
("LOCALE", '.'.join(map(str, locale.getlocale()))),
])
except (KeyError, ValueError):
pass
return blob
def get_installed_packages():
installed_packages = {}
for d in pkg_resources.working_set:
installed_packages[d.project_name] = d.version
return installed_packages
def get_featuretools_root():
return os.path.dirname(featuretools.__file__)
|
[] |
[] |
[
"LC_ALL",
"LANG"
] |
[]
|
["LC_ALL", "LANG"]
|
python
| 2 | 0 | |
examples/pwr_run/checkpointing/socket_short/min_par/job18.py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.007
args_model = 'densenet121'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_min_param/' + job_name + '*'
total_epochs = 19
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_min_param/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
if not args.resume:
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
# send signal 'jobxx param xxxxx'
message = job_name + ' param ' + str(trainable_count)
send_signal.send(args.node, 10002, message)
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
paclair/__main__.py
|
# -*- coding: utf-8 -*-
import json
from paclair.exceptions import PluginNotFoundException
from paclair.logged_object import LoggedObject
from paclair.config_reader import ConfigReader
from paclair.exceptions import PaclairException, ConfigurationError
from yaml import YAMLError
import logging
import logging.handlers
import argparse
import os
import sys
DEFAULT_CONFIG_FILE = "/etc/paclair.conf"
class PaClair(LoggedObject):
"""
Class Main
"""
def __init__(self, config_file=None):
"""
Constructor
:param config_file: configuration file
"""
super().__init__()
self._config_reader = ConfigReader(config_file or DEFAULT_CONFIG_FILE)
try:
self._plugins = self._config_reader.read_plugins('Plugins')
except YAMLError:
raise ConfigurationError("Incorrect configuration file")
def _check_plugin(self, plugin):
"""
Check if plugin is available
:param plugin: plugin to check
:raises PluginNotFoundException: if not found
"""
if plugin not in self._plugins:
raise PluginNotFoundException("Plugin {} is unknown".format(plugin))
def analyse(self, plugin, name, delete=False, output=None):
"""
Analyse a layer
:param plugin: plugin's name
:param name: resource to analyse
:param delete: delete after analyse
:param output: change default output
:return: string
:raises ResourceNotFoundException: if layer not found
:raise ClairConnectionError: if an error occurs requesting Clair
"""
self._check_plugin(plugin)
self.logger.debug("Analysing {}".format(name))
result = self._plugins[plugin].analyse(name, output)
if output == "stats":
result = '\n'.join(("{}: {}".format(k, v) for k, v in result.items()))
elif output != "html":
result = json.dumps(result)
if delete:
self.logger.debug("Deleting {}".format(name))
self._plugins[plugin].delete(name)
return result
def push(self, plugin, name):
"""
Push layer to Clair
:param plugin: plugin's name
:param name: resource to push
:return:
"""
self._check_plugin(plugin)
self.logger.debug("Push {} with plugin {}".format(name, plugin))
self._plugins[plugin].push(name)
def delete(self, plugin, name):
"""
Delete image from Clair
:param plugin: plugin's name
:param name: resource to delete
:raises ResourceNotFoundException: if layer not found
:raise ClairConnectionError: if an error occurs requesting Clair
"""
self._check_plugin(plugin)
self.logger.debug("Delete {} with plugin {}".format(name, plugin))
self._plugins[plugin].delete(name)
def main():
"""
Main
"""
# Create parser
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="Debug mode", action="store_true")
parser.add_argument("--syslog", help="Log to syslog", action="store_true")
parser.add_argument("--conf", help="Conf file", action="store", default=DEFAULT_CONFIG_FILE)
parser.add_argument("plugin", help="Plugin to launch", action="store")
parser.add_argument("hosts", help="Image/hostname to analyse", nargs='+', action="store")
# Subparsers
subparsers = parser.add_subparsers(help="Command to launch", dest="subparser_name")
subparsers.add_parser("push", help="Push images/hosts to Clair")
subparsers.add_parser("delete", help="Delete images/hosts from Clair")
parser_analyse = subparsers.add_parser("analyse", help="Analyse images/hosts already pushed to Clair")
parser_analyse.add_argument("--output-format", help="Change default output format (default: json)",
choices=['stats', 'html'])
parser_analyse.add_argument("--output-report", help="Change report location (default: logger)",
choices=['file', 'term'])
parser_analyse.add_argument("--output-dir", help="Change output directory (default: current)", action="store",
default=".")
parser_analyse.add_argument("--delete", help="Delete after analyse", action="store_true")
# Parse args
args = parser.parse_args()
# Init logger
logger = logging.getLogger()
if args.debug:
logger.setLevel(level=logging.DEBUG)
else:
logger.setLevel(level=logging.INFO)
if args.syslog:
# Logger format
formatter = logging.Formatter(
'PACLAIR[{}]: ({}) %(levelname).1s %(message)s'.format(os.getpid(), os.getenv('USER')))
# Syslog Handler
syslog_handler = logging.handlers.SysLogHandler(address='/dev/log')
syslog_handler.setFormatter(formatter)
logger.addHandler(syslog_handler)
else:
logger.addHandler(logging.StreamHandler())
# Run
try:
paclair_object = PaClair(args.conf)
except (OSError, ConfigurationError) as error:
logger.error(error)
sys.exit(1)
for host in args.hosts:
try:
if args.subparser_name == "push":
paclair_object.push(args.plugin, host)
logger.info("Pushed {} to Clair.".format(host))
elif args.subparser_name == "delete":
paclair_object.delete(args.plugin, host)
logger.info("{} was deleted from Clair.".format(host))
elif args.subparser_name == "analyse":
result = paclair_object.analyse(args.plugin, host, args.delete, args.output_format)
# Report
if args.output_report == "term":
print(result)
elif args.output_report == "file":
filename = os.path.join(args.output_dir, '{}.{}'.format(host.replace('/', '_'), args.output_format
or 'json'))
try:
with open(filename, "w", encoding="utf-8") as report_file:
report_file.write(result)
except (OSError, IOError):
logger.error("Can't write in directory: {}".format(args.output_dir))
sys.exit(4)
else:
logger.info(result)
else:
parser.print_help()
except PluginNotFoundException as error:
logger.error("Can't find plugin {} in configuration file.".format(args.plugin))
logger.error(error)
sys.exit(2)
except PaclairException as error:
logger.error("Error treating {}".format(host))
logger.error(error)
sys.exit(3)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
python
| 1 | 0 | |
src/klima_price/main.py
|
import os
import json
from web3 import Web3
import discord
from discord.ext import commands, tasks
BOT_TOKEN = os.environ["DISCORD_BOT_TOKEN"]
# Initialized Discord client
intents = discord.Intents.all()
intents.members = True
client = commands.Bot(intents=intents, help_command=None, command_prefix='&?')
# Initialize web3
project_id = os.environ['WEB3_INFURA_PROJECT_ID']
polygon_mainnet_endpoint = f'https://polygon-mainnet.infura.io/v3/{project_id}'
web3 = Web3(Web3.HTTPProvider(polygon_mainnet_endpoint))
assert(web3.isConnected())
def lp_contract_info(sushi_address, basePrice=1):
address = Web3.toChecksumAddress(sushi_address)
abi = json.loads('[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Burn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"}],"name":"Mint","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount0Out","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1Out","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Swap","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint112","name":"reserve0","type":"uint112"},{"indexed":false,"internalType":"uint112","name":"reserve1","type":"uint112"}],"name":"Sync","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"inputs":[],"name":"DOMAIN_SEPARATOR","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"MINIMUM_LIQUIDITY","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"PERMIT_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"burn","outputs":[{"internalType":"uint256","name":"amount0","type":"uint256"},{"internalType":"uint256","name":"amount1","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getReserves","outputs":[{"internalType":"uint112","name":"_reserve0","type":"uint112"},{"internalType":"uint112","name":"_reserve1","type":"uint112"},{"internalType":"uint32","name":"_blockTimestampLast","type":"uint32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_token0","type":"address"},{"internalType":"address","name":"_token1","type":"address"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"kLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"mint","outputs":[{"internalType":"uint256","name":"liquidity","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"nonces","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"permit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"price0CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"price1CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"skim","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount0Out","type":"uint256"},{"internalType":"uint256","name":"amount1Out","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"swap","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"sync","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"token0","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"token1","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"}]') # noqa: E501
sushiLP = web3.eth.contract(address=address, abi=abi)
try:
Reserves = sushiLP.functions.getReserves().call()
# usdc-bct
if sushi_address == '0x1e67124681b402064cd0abe8ed1b5c79d2e02f64':
tokenPrice = Reserves[0]*basePrice*1e12/Reserves[1]
# bct-klima
else:
tokenPrice = Reserves[0]*basePrice/(Reserves[1]*1e9)
return(tokenPrice)
except Exception:
pass
def klima_info():
address = Web3.toChecksumAddress("0x4e78011Ce80ee02d2c3e649Fb657E45898257815")
abi = json.loads('[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previousOwner","type":"address"},{"indexed":true,"internalType":"address","name":"newOwner","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"previousTWAPEpochPeriod","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"newTWAPEpochPeriod","type":"uint256"}],"name":"TWAPEpochChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previousTWAPOracle","type":"address"},{"indexed":true,"internalType":"address","name":"newTWAPOracle","type":"address"}],"name":"TWAPOracleChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"newTWAPSource","type":"address"}],"name":"TWAPSourceAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"removedTWAPSource","type":"address"}],"name":"TWAPSourceRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"inputs":[],"name":"DOMAIN_SEPARATOR","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"PERMIT_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account_","type":"address"},{"internalType":"uint256","name":"amount_","type":"uint256"}],"name":"_burnFrom","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newTWAPSourceDexPool_","type":"address"}],"name":"addTWAPSource","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"burn","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account_","type":"address"},{"internalType":"uint256","name":"amount_","type":"uint256"}],"name":"burnFrom","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"newTWAPEpochPeriod_","type":"uint256"}],"name":"changeTWAPEpochPeriod","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newTWAPOracle_","type":"address"}],"name":"changeTWAPOracle","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"subtractedValue","type":"uint256"}],"name":"decreaseAllowance","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"addedValue","type":"uint256"}],"name":"increaseAllowance","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account_","type":"address"},{"internalType":"uint256","name":"amount_","type":"uint256"}],"name":"mint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"}],"name":"nonces","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"permit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"twapSourceToRemove_","type":"address"}],"name":"removeTWAPSource","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"renounceOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"vault_","type":"address"}],"name":"setVault","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newOwner_","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"twapEpochPeriod","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"twapOracle","outputs":[{"internalType":"contract ITWAPOracle","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"vault","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"}]') # noqa: E501
klima_contract = web3.eth.contract(address=address, abi=abi)
try:
total_supply = klima_contract.functions.totalSupply().call()
return(total_supply/1e9)
except Exception:
pass
def get_info():
bct_price = lp_contract_info(sushi_address='0x1e67124681b402064cd0abe8ed1b5c79d2e02f64')
klima_price = lp_contract_info(sushi_address='0x9803c7ae526049210a1725f7487af26fe2c24614', basePrice=bct_price) # noqa: E501
supply = klima_info()
return(klima_price, supply)
@client.event
async def on_ready():
print('Logged in as {0.user}'.format(client))
if not update_info.is_running():
update_info.start()
@tasks.loop(seconds=300)
async def update_info():
price, supply = get_info()
if price is not None:
print(f'${price:,.2f} KLIMA')
print(f'Marketcap: ${price*supply/1e6:,.1f}M')
for guild in client.guilds:
guser = guild.get_member(client.user.id)
await guser.edit(nick=f'${price:,.2f} KLIMA')
if supply is not None:
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=f'Marketcap: ${price*supply/1e6:,.1f}M')) # noqa: E501
client.run(BOT_TOKEN)
|
[] |
[] |
[
"WEB3_INFURA_PROJECT_ID",
"DISCORD_BOT_TOKEN"
] |
[]
|
["WEB3_INFURA_PROJECT_ID", "DISCORD_BOT_TOKEN"]
|
python
| 2 | 0 | |
bob/git_promotions.py
|
import asyncio
import datetime
import os
import re
import pytz
import database
import main
from message_handler import promote
def broadcast_and_promote(updater):
bob_db_object = database.get_the_bob()
broadcast_message = os.getenv("COMMIT_MESSAGE")
loop = asyncio.get_event_loop()
if broadcast_message != bob_db_object.latest_startup_broadcast_message and broadcast_message != "":
# TODO: Make this a task
loop.run_until_complete(main.broadcast(updater.bot, broadcast_message))
bob_db_object.latest_startup_broadcast_message = broadcast_message
promote_committer_or_find_out_who_he_is(updater)
else:
loop.run_until_complete(main.broadcast(updater.bot, "Olin vain hiljaa hetken. "))
bob_db_object.save()
def promote_committer_or_find_out_who_he_is(updater):
commit_author_email, commit_author_name, git_user = get_git_user_and_commit_info()
if git_user.tg_user is not None:
promote_or_praise(git_user, updater.bot)
else:
reply_message = "Git käyttäjä " + str(commit_author_name) + " " + str(commit_author_email) + \
" ei ole minulle tuttu. Onko hän joku tästä ryhmästä?"
asyncio.run(main.broadcast(updater.bot, reply_message))
def get_git_user_and_commit_info():
commit_author_name = os.getenv("COMMIT_AUTHOR_NAME", "You should not see this")
commit_author_email = os.getenv("COMMIT_AUTHOR_EMAIL", "You should not see this")
git_user = database.get_git_user(commit_author_name, commit_author_email)
return commit_author_email, commit_author_name, git_user
def promote_or_praise(git_user, bot):
now = datetime.datetime.now(pytz.timezone('Europe/Helsinki'))
tg_user = database.get_telegram_user(user_id=git_user.tg_user.id)
if tg_user.latest_promotion_from_git_commit is None or \
tg_user.latest_promotion_from_git_commit < now.date() - datetime.timedelta(days=6):
committer_chat_memberships = database.get_chat_memberships_for_user(tg_user=git_user.tg_user)
for membership in committer_chat_memberships:
promote(membership)
asyncio.run(main.broadcast(bot, str(git_user.tg_user) + " ansaitsi ylennyksen ahkeralla työllä. "))
tg_user.latest_promotion_from_git_commit = now.date()
tg_user.save()
else:
# It has not been week yet since last promotion
asyncio.run(main.broadcast(bot, "Kiitos " + str(git_user.tg_user) + ", hyvää työtä!"))
def process_entities(update):
global_admin = database.get_global_admin()
if global_admin is not None:
if update.effective_user.id == global_admin.id:
for message_entity in update.message.entities:
process_entity(message_entity, update)
else:
update.message.reply_text("Et oo vissiin global_admin? ")
else:
update.message.reply_text("Globaalia adminia ei ole asetettu.")
def process_entity(message_entity, update):
commit_author_email, commit_author_name, git_user = get_git_user_and_commit_info()
if message_entity.type == "text_mention":
user = database.get_telegram_user(message_entity.user.id)
git_user.tg_user = user
elif message_entity.type == "mention":
username = re.search('@(.*)', update.message.text)
telegram_users = database.get_telegram_user_by_name(str(username.group(1)).strip())
if telegram_users.count() > 0:
git_user.tg_user = telegram_users[0]
else:
update.message.reply_text("En löytänyt tietokannastani ketään tuon nimistä. ")
git_user.save()
promote_or_praise(git_user, update.message.bot)
|
[] |
[] |
[
"COMMIT_AUTHOR_NAME",
"COMMIT_AUTHOR_EMAIL",
"COMMIT_MESSAGE"
] |
[]
|
["COMMIT_AUTHOR_NAME", "COMMIT_AUTHOR_EMAIL", "COMMIT_MESSAGE"]
|
python
| 3 | 0 | |
integration/helper_test.go
|
package integration
import (
"bytes"
"compress/gzip"
"fmt"
"github.com/pierrec/lz4/v4"
"io/ioutil"
"math"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/greenplum-db/gp-common-go-libs/operating"
"github.com/klauspost/compress/zstd"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
testDir = "/tmp/helper_test/20180101/20180101010101"
pluginDir = "/tmp/plugin_dest/20180101/20180101010101"
tocFile = fmt.Sprintf("%s/test_toc.yaml", testDir)
oidFile = fmt.Sprintf("%s/test_oids", testDir)
pipeFile = fmt.Sprintf("%s/test_pipe", testDir)
dataFileFullPath = filepath.Join(testDir, "test_data")
pluginBackupPath = filepath.Join(pluginDir, "test_data")
errorFile = fmt.Sprintf("%s_error", pipeFile)
pluginConfigPath = fmt.Sprintf("%s/src/github.com/greenplum-db/gpbackup/plugins/example_plugin_config.yaml", os.Getenv("GOPATH"))
)
const (
defaultData = "here is some data\n"
expectedData = `here is some data
here is some data
here is some data
`
expectedTOC = `dataentries:
1:
startbyte: 0
endbyte: 18
2:
startbyte: 18
endbyte: 36
3:
startbyte: 36
endbyte: 54
`
)
func gpbackupHelper(helperPath string, args ...string) *exec.Cmd {
args = append([]string{"--toc-file", tocFile, "--oid-file", oidFile, "--pipe-file", pipeFile, "--content", "1"}, args...)
command := exec.Command(helperPath, args...)
err := command.Start()
Expect(err).ToNot(HaveOccurred())
return command
}
func buildAndInstallBinaries() string {
_ = os.Chdir("..")
command := exec.Command("make", "build")
output, err := command.CombinedOutput()
if err != nil {
fmt.Printf("%s", output)
Fail(fmt.Sprintf("%v", err))
}
_ = os.Chdir("integration")
binDir := fmt.Sprintf("%s/bin", operating.System.Getenv("GOPATH"))
return fmt.Sprintf("%s/gpbackup_helper", binDir)
}
var _ = Describe("gpbackup_helper end to end integration tests", func() {
BeforeEach(func() {
err := os.RemoveAll(testDir)
Expect(err).ToNot(HaveOccurred())
err = os.MkdirAll(testDir, 0777)
Expect(err).ToNot(HaveOccurred())
err = os.RemoveAll(pluginDir)
Expect(err).ToNot(HaveOccurred())
err = os.MkdirAll(pluginDir, 0777)
Expect(err).ToNot(HaveOccurred())
err = syscall.Mkfifo(fmt.Sprintf("%s_%d", pipeFile, 1), 0777)
if err != nil {
Fail(fmt.Sprintf("%v", err))
}
})
Context("backup tests", func() {
BeforeEach(func() {
f, _ := os.Create(oidFile)
_, _ = f.WriteString("1\n2\n3\n")
})
It("runs backup gpbackup_helper without compression", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-level", "0", "--data-file", dataFileFullPath)
writeToPipes(defaultData)
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertBackupArtifacts(false)
})
It("runs backup gpbackup_helper with data exceeding pipe buffer size", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-level", "0", "--data-file", dataFileFullPath)
writeToPipes(strings.Repeat("a", int(math.Pow(2, 17))))
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
})
It("runs backup gpbackup_helper with gzip compression", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-type", "gzip", "--compression-level", "1", "--data-file", dataFileFullPath+".gz")
writeToPipes(defaultData)
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertBackupArtifactsWithCompression("gzip", false)
})
It("runs backup gpbackup_helper with zstd compression", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-type", "zstd", "--compression-level", "1", "--data-file", dataFileFullPath+".zst")
writeToPipes(defaultData)
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertBackupArtifactsWithCompression("zstd", false)
})
It("runs backup gpbackup_helper with lz4 compression", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-type", "lz4", "--compression-level", "1", "--data-file", dataFileFullPath+".lz4")
writeToPipes(defaultData)
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertBackupArtifactsWithCompression("lz4", false)
})
It("runs backup gpbackup_helper without compression with plugin", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-level", "0", "--data-file", dataFileFullPath, "--plugin-config", pluginConfigPath)
writeToPipes(defaultData)
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertBackupArtifacts(true)
})
It("runs backup gpbackup_helper with gzip compression with plugin", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-type", "gzip", "--compression-level", "1", "--data-file", dataFileFullPath+".gz", "--plugin-config", pluginConfigPath)
writeToPipes(defaultData)
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertBackupArtifactsWithCompression("gzip", true)
})
It("runs backup gpbackup_helper with zstd compression with plugin", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-type", "zstd", "--compression-level", "1", "--data-file", dataFileFullPath+".zst", "--plugin-config", pluginConfigPath)
writeToPipes(defaultData)
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertBackupArtifactsWithCompression("zstd", true)
})
It("runs backup gpbackup_helper with lz4 compression with plugin", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-type", "lz4", "--compression-level", "1", "--data-file", dataFileFullPath+".lz4", "--plugin-config", pluginConfigPath)
writeToPipes(defaultData)
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertBackupArtifactsWithCompression("lz4", true)
})
It("Generates error file when backup agent interrupted", func() {
helperCmd := gpbackupHelper(gpbackupHelperPath, "--backup-agent", "--compression-level", "0", "--data-file", dataFileFullPath)
waitForPipeCreation()
err := helperCmd.Process.Signal(os.Interrupt)
Expect(err).ToNot(HaveOccurred())
err = helperCmd.Wait()
Expect(err).To(HaveOccurred())
assertErrorsHandled()
})
})
Context("restore tests", func() {
It("runs restore gpbackup_helper without compression", func() {
setupRestoreFiles("", false)
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath)
for _, i := range []int{1, 3} {
contents, _ := ioutil.ReadFile(fmt.Sprintf("%s_%d", pipeFile, i))
Expect(string(contents)).To(Equal("here is some data\n"))
}
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertNoErrors()
})
It("runs restore gpbackup_helper with gzip compression", func() {
setupRestoreFiles("gzip", false)
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath+".gz")
for _, i := range []int{1, 3} {
contents, _ := ioutil.ReadFile(fmt.Sprintf("%s_%d", pipeFile, i))
Expect(string(contents)).To(Equal("here is some data\n"))
}
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertNoErrors()
})
It("runs restore gpbackup_helper with zstd compression", func() {
setupRestoreFiles("zstd", false)
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath+".zst")
for _, i := range []int{1, 3} {
contents, _ := ioutil.ReadFile(fmt.Sprintf("%s_%d", pipeFile, i))
Expect(string(contents)).To(Equal("here is some data\n"))
}
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertNoErrors()
})
It("runs restore gpbackup_helper with lz4 compression", func() {
setupRestoreFiles("lz4", false)
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath+".lz4")
for _, i := range []int{1, 3} {
contents, _ := ioutil.ReadFile(fmt.Sprintf("%s_%d", pipeFile, i))
Expect(string(contents)).To(Equal("here is some data\n"))
}
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertNoErrors()
})
It("runs restore gpbackup_helper without compression with plugin", func() {
setupRestoreFiles("", true)
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath, "--plugin-config", pluginConfigPath)
for _, i := range []int{1, 3} {
contents, _ := ioutil.ReadFile(fmt.Sprintf("%s_%d", pipeFile, i))
Expect(string(contents)).To(Equal("here is some data\n"))
}
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertNoErrors()
})
It("runs restore gpbackup_helper with gzip compression with plugin", func() {
setupRestoreFiles("gzip", true)
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath+".gz", "--plugin-config", pluginConfigPath)
for _, i := range []int{1, 3} {
contents, _ := ioutil.ReadFile(fmt.Sprintf("%s_%d", pipeFile, i))
Expect(string(contents)).To(Equal("here is some data\n"))
}
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertNoErrors()
})
It("runs restore gpbackup_helper with zstd compression with plugin", func() {
setupRestoreFiles("zstd", true)
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath+".zst", "--plugin-config", pluginConfigPath)
for _, i := range []int{1, 3} {
contents, _ := ioutil.ReadFile(fmt.Sprintf("%s_%d", pipeFile, i))
Expect(string(contents)).To(Equal("here is some data\n"))
}
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertNoErrors()
})
It("runs restore gpbackup_helper with lz4 compression with plugin", func() {
setupRestoreFiles("lz4", true)
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath+".lz4", "--plugin-config", pluginConfigPath)
for _, i := range []int{1, 3} {
contents, _ := ioutil.ReadFile(fmt.Sprintf("%s_%d", pipeFile, i))
Expect(string(contents)).To(Equal("here is some data\n"))
}
err := helperCmd.Wait()
printHelperLogOnError(err)
Expect(err).ToNot(HaveOccurred())
assertNoErrors()
})
It("Generates error file when restore agent interrupted", func() {
setupRestoreFiles("gzip", false)
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath+".gz")
waitForPipeCreation()
err := helperCmd.Process.Signal(os.Interrupt)
Expect(err).ToNot(HaveOccurred())
err = helperCmd.Wait()
Expect(err).To(HaveOccurred())
assertErrorsHandled()
})
It("Continues restore process when encountering an error with flag --on-error-continue", func() {
// Write data file
dataFile := dataFileFullPath
f, _ := os.Create(dataFile + ".gz")
gzipf := gzip.NewWriter(f)
// Named pipes can buffer, so we need to write more than the buffer size to trigger flush error
customData := "here is some data\n"
dataLength := 128*1024 + 1
customData += strings.Repeat("a", dataLength)
customData += "here is some data\n"
_, _ = gzipf.Write([]byte(customData))
_ = gzipf.Close()
// Write oid file
fOid, _ := os.Create(oidFile)
_, _ = fOid.WriteString("1\n2\n3\n")
defer func() {
_ = os.Remove(oidFile)
}()
// Write custom TOC
customTOC := fmt.Sprintf(`dataentries:
1:
startbyte: 0
endbyte: 18
2:
startbyte: 18
endbyte: %[1]d
3:
startbyte: %[1]d
endbyte: %d
`, dataLength+18, dataLength+18+18)
fToc, _ := os.Create(tocFile)
_, _ = fToc.WriteString(customTOC)
defer func() {
_ = os.Remove(tocFile)
}()
helperCmd := gpbackupHelper(gpbackupHelperPath, "--restore-agent", "--data-file", dataFileFullPath+".gz", "--on-error-continue")
for k, v := range []int{1, 2, 3} {
currentPipe := fmt.Sprintf("%s_%d", pipeFile, v)
if k == 1 {
// Do not read from the pipe to cause data load error on the helper by interrupting the write.
file, errOpen := os.Open(currentPipe)
Expect(errOpen).ToNot(HaveOccurred())
errClose := file.Close()
Expect(errClose).ToNot(HaveOccurred())
} else {
contents, err := ioutil.ReadFile(currentPipe)
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(Equal("here is some data\n"))
}
}
// Block here until gpbackup_helper finishes (cleaning up pipes)
_ = helperCmd.Wait()
for _, i := range []int{1, 2, 3} {
currentPipe := fmt.Sprintf("%s_%d", pipeFile, i)
Expect(currentPipe).ToNot(BeAnExistingFile())
}
// Check that an error file was created
Expect(errorFile).To(BeAnExistingFile())
})
})
})
func setupRestoreFiles(compressionType string, withPlugin bool) {
dataFile := dataFileFullPath
if withPlugin {
dataFile = pluginBackupPath
}
f, _ := os.Create(oidFile)
_, _ = f.WriteString("1\n3\n")
if compressionType == "gzip" {
f, _ := os.Create(dataFile + ".gz")
defer f.Close()
gzipf := gzip.NewWriter(f)
defer gzipf.Close()
_, _ = gzipf.Write([]byte(expectedData))
} else if compressionType == "zstd" {
f, _ := os.Create(dataFile + ".zst")
defer f.Close()
zstdf, _ := zstd.NewWriter(f)
defer zstdf.Close()
_, _ = zstdf.Write([]byte(expectedData))
} else if compressionType == "lz4" {
f, _ := os.Create(dataFile + ".lz4")
defer f.Close()
lz4f := lz4.NewWriter(f)
defer lz4f.Close()
_, _ = lz4f.Write([]byte(expectedData))
} else {
f, _ := os.Create(dataFile)
_, _ = f.WriteString(expectedData)
}
f, _ = os.Create(tocFile)
_, _ = f.WriteString(expectedTOC)
}
func assertNoErrors() {
Expect(errorFile).To(Not(BeARegularFile()))
pipes, err := filepath.Glob(pipeFile + "_[1-9]*")
Expect(err).ToNot(HaveOccurred())
Expect(pipes).To(BeEmpty())
}
func assertErrorsHandled() {
Expect(errorFile).To(BeARegularFile())
pipes, err := filepath.Glob(pipeFile + "_[1-9]*")
Expect(err).ToNot(HaveOccurred())
Expect(pipes).To(BeEmpty())
}
func assertBackupArtifacts(withPlugin bool) {
var contents []byte
var err error
dataFile := dataFileFullPath
if withPlugin {
dataFile = pluginBackupPath
}
contents, err = ioutil.ReadFile(dataFile)
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(Equal(expectedData))
contents, err = ioutil.ReadFile(tocFile)
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(Equal(expectedTOC))
assertNoErrors()
}
func assertBackupArtifactsWithCompression(compressionType string, withPlugin bool) {
var contents []byte
var err error
dataFile := dataFileFullPath
if withPlugin {
dataFile = pluginBackupPath
}
if compressionType == "gzip" {
contents, err = ioutil.ReadFile(dataFile + ".gz")
} else if compressionType == "zstd" {
contents, err = ioutil.ReadFile(dataFile + ".zst")
} else if compressionType == "lz4" {
contents, err = ioutil.ReadFile(dataFile + ".lz4")
} else {
Fail("unknown compression type " + compressionType)
}
Expect(err).ToNot(HaveOccurred())
if compressionType == "gzip" {
r, _ := gzip.NewReader(bytes.NewReader(contents))
contents, _ = ioutil.ReadAll(r)
} else if compressionType == "zstd" {
r, _ := zstd.NewReader(bytes.NewReader(contents))
contents, _ = ioutil.ReadAll(r)
} else if compressionType == "lz4" {
r := lz4.NewReader(bytes.NewReader(contents))
contents, _ = ioutil.ReadAll(r)
} else {
Fail("unknown compression type " + compressionType)
}
Expect(string(contents)).To(Equal(expectedData))
contents, err = ioutil.ReadFile(tocFile)
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(Equal(expectedTOC))
assertNoErrors()
}
func printHelperLogOnError(helperErr error) {
if helperErr != nil {
homeDir := os.Getenv("HOME")
helperFiles, _ := filepath.Glob(filepath.Join(homeDir, "gpAdminLogs/gpbackup_helper_*"))
command := exec.Command("tail", "-n 20", helperFiles[len(helperFiles)-1])
output, _ := command.CombinedOutput()
fmt.Println(string(output))
}
}
func writeToPipes(data string) {
for i := 1; i <= 3; i++ {
currentPipe := fmt.Sprintf("%s_%d", pipeFile, i)
_, err := os.Stat(currentPipe)
if err != nil {
Fail(fmt.Sprintf("%v", err))
}
f, _ := os.Create("/tmp/tmpdata.txt")
_, _ = f.WriteString(data)
output, err := exec.Command("bash", "-c", fmt.Sprintf("cat %s > %s", "/tmp/tmpdata.txt", currentPipe)).CombinedOutput()
_ = f.Close()
_ = os.Remove("/tmp/tmpdata.txt")
if err != nil {
fmt.Printf("%s", output)
Fail(fmt.Sprintf("%v", err))
}
}
}
func waitForPipeCreation() {
// wait up to 5 seconds for two pipe files to have been created
tries := 0
for tries < 1000 {
pipes, err := filepath.Glob(pipeFile + "_[1-9]*")
Expect(err).ToNot(HaveOccurred())
if len(pipes) > 1 {
return
}
tries += 1
time.Sleep(5 * time.Millisecond)
}
}
|
[
"\"GOPATH\"",
"\"HOME\""
] |
[] |
[
"GOPATH",
"HOME"
] |
[]
|
["GOPATH", "HOME"]
|
go
| 2 | 0 | |
vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing topi conv2d_transpose operator for VTA"""
import json
import os
import pytest
import numpy as np
from collections import namedtuple
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from tvm.contrib import utils
from tvm.contrib.pickle_memoize import memoize
from tvm import topi
import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
from vta.testing import simulator
Workload = namedtuple(
"Conv2DTransposeWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
"o_hpad",
"o_wpad",
],
)
# Get batch info from env
env = vta.get_env()
# DCGAN workloads
dcgan_wklds = [
# dcgan
("DCGAN.CT1", Workload(env.BATCH, 4, 4, 1024, 512, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT2", Workload(env.BATCH, 8, 8, 512, 256, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT3", Workload(env.BATCH, 16, 16, 256, 128, 4, 4, 1, 1, 2, 2, 0, 0)),
]
# FIXME: we need a custom clip operator to circumvent a pattern detection limitation
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
# Helper function to get factors
def _find_factors(n):
factors = []
for f in range(1, n + 1):
if n % f == 0:
factors.append(f)
return factors
def run_conv2d_transpose(
env, remote, wl, target, check_correctness=True, print_ir=False, samples=4
):
# Workload assertions
assert wl.hpad == wl.wpad
# Perform packing only if we are targeting the accelerator
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
fcompute = topi.arm_cpu.conv2d_transpose_nchw
fschedule = topi.arm_cpu.schedule_conv2d_transpose_nchw
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
fcompute = vta.top.conv2d_transpose_packed
fschedule = vta.top.schedule_conv2d_transpose_packed
# Derive shapes depending upon packing
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)
if data_pack:
data_shape = (
wl.batch // env.BATCH,
wl.in_filter // env.BLOCK_IN,
wl.height,
wl.width,
env.BATCH,
env.BLOCK_IN,
)
kernel_shape = (
wl.out_filter // env.BLOCK_OUT,
wl.in_filter // env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
env.BLOCK_OUT,
env.BLOCK_IN,
)
else:
data_shape = a_shape
kernel_shape = w_shape
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))
# Define base computation schedule
with target:
res = fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, env.acc_dtype, (wl.o_hpad, wl.o_wpad)
)
res = topi.right_shift(res, env.WGT_WIDTH)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
# Derive base schedule
s = fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, res], simple_mode=True))
# Derive number of ops
fout_height = (wl.height - 1) * wl.hstride - 2 * wl.hpad + wl.hkernel + wl.o_hpad
fout_width = (wl.width - 1) * wl.wstride - 2 * wl.wpad + wl.wkernel + wl.o_wpad
num_ops = (
2
* wl.batch
* fout_height
* fout_width
* wl.hkernel
* wl.wkernel
* wl.out_filter
* wl.in_filter
)
# @memoize("vta.tests.test_benchmark_topi.conv2d.verify_nhwc")
def get_ref_data():
# derive min max for act and wgt types (max non inclusive)
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(
w_min, w_max, size=(wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)
).astype(kernel.dtype)
r_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np.astype(env.acc_dtype),
w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride),
wl.hpad,
(wl.o_hpad, wl.o_wpad),
).astype(env.acc_dtype)
return a_np, w_np, r_np
# Data in original format
data_np, kernel_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch // env.BATCH,
env.BATCH,
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.height,
wl.width,
).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.out_filter // env.BLOCK_OUT,
env.BLOCK_OUT,
wl.hkernel,
wl.wkernel,
).transpose((2, 0, 4, 5, 3, 1))
kernel_np = np.flip(kernel_np, 2)
kernel_np = np.flip(kernel_np, 3)
# Build
if "vta" in target.keys:
mod = vta.build(
s,
[data, kernel, res],
target=target,
target_host=env.target_host,
name="conv2d_transpose",
)
else:
mod = tvm.build(
s,
[data, kernel, res],
target=target,
target_host=env.target_host,
name="conv2d_transpose",
)
temp = utils.tempdir()
mod.save(temp.relpath("conv2d_transpose.o"))
remote.upload(temp.relpath("conv2d_transpose.o"))
f = remote.load_module("conv2d_transpose.o")
ctx = remote.context(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, ctx)
kernel_arr = tvm.nd.array(kernel_np, ctx)
res_arr = tvm.nd.array(res_np, ctx)
time_f = f.time_evaluator("conv2d_transpose", ctx, number=samples)
# In vta sim mode, collect simulator runtime statistics
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
# Check if we're in local RPC mode (allows us to rebuild the
# runtime on the fly when varying the VTA designs)
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, res_arr)
# Check correctness
correct = False
if check_correctness:
res_orig = res_arr.asnumpy()
if data_pack:
res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(
wl.batch, wl.out_filter, fout_height, fout_width
)
res_ref = res_ref >> env.WGT_WIDTH
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10 ** 9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
@pytest.mark.parametrize("device", ["vta", "arm_cpu"])
def test_conv2d_transpose(device):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target): # load pre-tuned schedule parameters
for _, wl in dcgan_wklds:
print(wl)
run_conv2d_transpose(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d_transpose(device="arm_cpu")
test_conv2d_transpose(device="vta")
|
[] |
[] |
[
"VTA_LOCAL_SIM_RPC"
] |
[]
|
["VTA_LOCAL_SIM_RPC"]
|
python
| 1 | 0 | |
engine/docker/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go
|
package main
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
"time"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/cli"
"github.com/docker/docker/integration-cli/cli/build"
"github.com/docker/docker/integration-cli/cli/build/fakecontext"
"github.com/docker/docker/integration-cli/cli/build/fakegit"
"github.com/docker/docker/integration-cli/cli/build/fakestorage"
"github.com/docker/docker/internal/testutil"
"github.com/docker/docker/pkg/archive"
"github.com/go-check/check"
"github.com/gotestyourself/gotestyourself/icmd"
digest "github.com/opencontainers/go-digest"
)
func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) {
cli.BuildCmd(c, "testbuildjsonemptyrun", build.WithDockerfile(`
FROM busybox
RUN []
`))
}
func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) {
name := "testbuildshcmdjsonentrypoint"
expected := "/bin/sh -c echo test"
if testEnv.DaemonPlatform() == "windows" {
expected = "cmd /S /C echo test"
}
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENTRYPOINT ["echo"]
CMD echo test
`))
out, _ := dockerCmd(c, "run", "--rm", name)
if strings.TrimSpace(out) != expected {
c.Fatalf("CMD did not contain %q : %q", expected, out)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) {
// Windows does not support FROM scratch or the USER command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM scratch
ENV user foo
USER ${user}
`))
res := inspectFieldJSON(c, name, "Config.User")
if res != `"foo"` {
c.Fatal("User foo from environment not in Config.User on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) {
name := "testbuildenvironmentreplacement"
var volumePath string
if testEnv.DaemonPlatform() == "windows" {
volumePath = "c:/quux"
} else {
volumePath = "/quux"
}
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+minimalBaseImage()+`
ENV volume `+volumePath+`
VOLUME ${volume}
`))
var volumes map[string]interface{}
inspectFieldAndUnmarshall(c, name, "Config.Volumes", &volumes)
if _, ok := volumes[volumePath]; !ok {
c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) {
// Windows does not support FROM scratch or the EXPOSE command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM scratch
ENV port 80
EXPOSE ${port}
ENV ports " 99 100 "
EXPOSE ${ports}
`))
var exposedPorts map[string]interface{}
inspectFieldAndUnmarshall(c, name, "Config.ExposedPorts", &exposedPorts)
exp := []int{80, 99, 100}
for _, p := range exp {
tmp := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[tmp]; !ok {
c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p)
}
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) {
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV MYWORKDIR /work
RUN mkdir ${MYWORKDIR}
WORKDIR ${MYWORKDIR}
`))
res := inspectFieldJSON(c, name, "Config.WorkingDir")
expected := `"/work"`
if testEnv.DaemonPlatform() == "windows" {
expected = `"C:\\work"`
}
if res != expected {
c.Fatalf("Workdir /workdir from environment not in Config.WorkingDir on image: %s", res)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) {
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM `+minimalBaseImage()+`
ENV baz foo
ENV quux bar
ENV dot .
ENV fee fff
ENV gee ggg
ADD ${baz} ${dot}
COPY ${quux} ${dot}
ADD ${zzz:-${fee}} ${dot}
COPY ${zzz:-${gee}} ${dot}
`),
build.WithFile("foo", "test1"),
build.WithFile("bar", "test2"),
build.WithFile("fff", "test3"),
build.WithFile("ggg", "test4"),
))
}
func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV foo zzz
ENV bar ${foo}
ENV abc1='$foo'
ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}"
RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo)
ENV abc2="\$foo"
RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo)
ENV abc3 '$foo'
RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo)
ENV abc4 "\$foo"
RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo)
ENV foo2="abc\def"
RUN [ "$foo2" = 'abc\def' ]
ENV foo3="abc\\def"
RUN [ "$foo3" = 'abc\def' ]
ENV foo4='abc\\def'
RUN [ "$foo4" = 'abc\\def' ]
ENV foo5='abc\def'
RUN [ "$foo5" = 'abc\def' ]
`))
envResult := []string{}
inspectFieldAndUnmarshall(c, name, "Config.Env", &envResult)
found := false
envCount := 0
for _, env := range envResult {
parts := strings.SplitN(env, "=", 2)
if parts[0] == "bar" {
found = true
if parts[1] != "zzz" {
c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "zzz" {
c.Fatalf("%s should be 'zzz' but instead its %q", parts[0], parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "foo" {
c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
}
}
}
if !found {
c.Fatal("Never found the `bar` env variable")
}
if envCount != 4 {
c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult)
}
}
func (s *DockerSuite) TestBuildHandleEscapesInVolume(c *check.C) {
// The volume paths used in this test are invalid on Windows
testRequires(c, DaemonIsLinux)
name := "testbuildhandleescapes"
testCases := []struct {
volumeValue string
expected string
}{
{
volumeValue: "${FOO}",
expected: "bar",
},
{
volumeValue: `\${FOO}`,
expected: "${FOO}",
},
// this test in particular provides *7* backslashes and expects 6 to come back.
// Like above, the first escape is swallowed and the rest are treated as
// literals, this one is just less obvious because of all the character noise.
{
volumeValue: `\\\\\\\${FOO}`,
expected: `\\\${FOO}`,
},
}
for _, tc := range testCases {
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`
FROM scratch
ENV FOO bar
VOLUME %s
`, tc.volumeValue)))
var result map[string]map[string]struct{}
inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result)
if _, ok := result[tc.expected]; !ok {
c.Fatalf("Could not find volume %s set from env foo in volumes table, got %q", tc.expected, result)
}
// Remove the image for the next iteration
dockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) {
name := "testbuildonbuildlowercase"
name2 := "testbuildonbuildlowercase2"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
onbuild run echo quux
`))
result := buildImage(name2, build.WithDockerfile(fmt.Sprintf(`
FROM %s
`, name)))
result.Assert(c, icmd.Success)
if !strings.Contains(result.Combined(), "quux") {
c.Fatalf("Did not receive the expected echo text, got %s", result.Combined())
}
if strings.Contains(result.Combined(), "ONBUILD ONBUILD") {
c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", result.Combined())
}
}
func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvescapes"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV TEST foo
CMD echo \$
`))
out, _ := dockerCmd(c, "run", "-t", name)
if strings.TrimSpace(out) != "$" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvoverwrite"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV TEST foo
CMD echo ${TEST}
`))
out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name)
if strings.TrimSpace(out) != "bar" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
// FIXME(vdemeester) why we disabled cache here ?
func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
cli.BuildCmd(c, name1, build.WithDockerfile(`
FROM busybox
ONBUILD CMD ["hello world"]
ONBUILD ENTRYPOINT ["echo"]
ONBUILD RUN ["true"]`))
cli.BuildCmd(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s`, name1)))
result := cli.DockerCmd(c, "run", name2)
result.Assert(c, icmd.Expected{Out: "hello world"})
}
// FIXME(vdemeester) why we disabled cache here ?
func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
buildImageSuccessfully(c, name1, build.WithDockerfile(`
FROM busybox
ONBUILD ENTRYPOINT ["echo"]`))
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1)))
out, _ := dockerCmd(c, "run", name2)
if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
c.Fatal("got malformed output from onbuild", out)
}
}
func (s *DockerSuite) TestBuildCacheAdd(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildtwoimageswithadd"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"robots.txt": "hello",
"index.html": "world",
}))
defer server.Close()
cli.BuildCmd(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch
ADD %s/robots.txt /`, server.URL())))
result := cli.Docker(cli.Build(name), build.WithDockerfile(fmt.Sprintf(`FROM scratch
ADD %s/index.html /`, server.URL())))
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), "Using cache") {
c.Fatal("2nd build used cache on ADD, it shouldn't")
}
}
func (s *DockerSuite) TestBuildLastModified(c *check.C) {
// Temporary fix for #30890. TODO @jhowardmsft figure out what
// has changed in the master busybox image.
testRequires(c, DaemonIsLinux)
name := "testbuildlastmodified"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"file": "hello",
}))
defer server.Close()
var out, out2 string
dFmt := `FROM busybox
ADD %s/file /`
dockerfile := fmt.Sprintf(dFmt, server.URL())
cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
out = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined()
// Build it again and make sure the mtime of the file didn't change.
// Wait a few seconds to make sure the time changed enough to notice
time.Sleep(2 * time.Second)
cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined()
if out != out2 {
c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2)
}
// Now 'touch' the file and make sure the timestamp DID change this time
// Create a new fakeStorage instead of just using Add() to help windows
server = fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"file": "hello",
}))
defer server.Close()
dockerfile = fmt.Sprintf(dFmt, server.URL())
cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined()
if out == out2 {
c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2)
}
}
// Regression for https://github.com/docker/docker/pull/27805
// Makes sure that we don't use the cache if the contents of
// a file in a subfolder of the context is modified and we re-build.
func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) {
name := "testbuildmodifyfileinfolder"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
RUN ["mkdir", "/test"]
ADD folder/file /test/changetarget`))
defer ctx.Close()
if err := ctx.Add("folder/file", "first"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
if err := ctx.Add("folder/file", "second"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 == id2 {
c.Fatal("cache was used even though file contents in folder was changed")
}
}
func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddimg", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
build.WithFile("test_file", "test1")))
}
// Issue #3960: "ADD src ." hangs
func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) {
name := "testaddsinglefiletoworkdir"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(
`FROM busybox
ADD test_file .`),
fakecontext.WithFiles(map[string]string{
"test_file": "test1",
}))
defer ctx.Close()
errChan := make(chan error)
go func() {
errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
cli.BuildCmd(c, "testaddsinglefiletoexistdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"robots.txt": "hello",
}))
defer server.Close()
cli.BuildCmd(c, "testcopymultiplefilestofile", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file1 test_file2 /exists/
ADD test_file3 test_file4 %s/robots.txt /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
`, server.URL())),
build.WithFile("test_file1", "test1"),
build.WithFile("test_file2", "test2"),
build.WithFile("test_file3", "test3"),
build.WithFile("test_file3", "test3"),
build.WithFile("test_file4", "test4")))
}
// These tests are mainly for user namespaces to verify that new directories
// are created as the remapped root uid/gid pair
func (s *DockerSuite) TestBuildUsernamespaceValidateRemappedRoot(c *check.C) {
testRequires(c, DaemonIsLinux)
testCases := []string{
"ADD . /new_dir",
"COPY test_dir /new_dir",
"WORKDIR /new_dir",
}
name := "testbuildusernamespacevalidateremappedroot"
for _, tc := range testCases {
cli.BuildCmd(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
%s
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, tc)),
build.WithFile("test_dir/test_file", "test file")))
cli.DockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildAddAndCopyFileWithWhitespace(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently passing on Windows
name := "testaddfilewithwhitespace"
for _, command := range []string{"ADD", "COPY"} {
cli.BuildCmd(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN mkdir "/test dir"
RUN mkdir "/test_dir"
%s [ "test file1", "/test_file1" ]
%s [ "test_file2", "/test file2" ]
%s [ "test file3", "/test file3" ]
%s [ "test dir/test_file4", "/test_dir/test_file4" ]
%s [ "test_dir/test_file5", "/test dir/test_file5" ]
%s [ "test dir/test_file6", "/test dir/test_file6" ]
RUN [ $(cat "/test_file1") = 'test1' ]
RUN [ $(cat "/test file2") = 'test2' ]
RUN [ $(cat "/test file3") = 'test3' ]
RUN [ $(cat "/test_dir/test_file4") = 'test4' ]
RUN [ $(cat "/test dir/test_file5") = 'test5' ]
RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, command, command, command, command, command, command)),
build.WithFile("test file1", "test1"),
build.WithFile("test_file2", "test2"),
build.WithFile("test file3", "test3"),
build.WithFile("test dir/test_file4", "test4"),
build.WithFile("test_dir/test_file5", "test5"),
build.WithFile("test dir/test_file6", "test6"),
))
cli.DockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildCopyFileWithWhitespaceOnWindows(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `FROM ` + testEnv.MinimalBaseImage() + `
RUN mkdir "C:/test dir"
RUN mkdir "C:/test_dir"
COPY [ "test file1", "/test_file1" ]
COPY [ "test_file2", "/test file2" ]
COPY [ "test file3", "/test file3" ]
COPY [ "test dir/test_file4", "/test_dir/test_file4" ]
COPY [ "test_dir/test_file5", "/test dir/test_file5" ]
COPY [ "test dir/test_file6", "/test dir/test_file6" ]
RUN find "test1" "C:/test_file1"
RUN find "test2" "C:/test file2"
RUN find "test3" "C:/test file3"
RUN find "test4" "C:/test_dir/test_file4"
RUN find "test5" "C:/test dir/test_file5"
RUN find "test6" "C:/test dir/test_file6"`
name := "testcopyfilewithwhitespace"
cli.BuildCmd(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("test file1", "test1"),
build.WithFile("test_file2", "test2"),
build.WithFile("test file3", "test3"),
build.WithFile("test dir/test_file4", "test4"),
build.WithFile("test_dir/test_file5", "test5"),
build.WithFile("test dir/test_file6", "test6"),
))
}
func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) {
name := "testcopywildcard"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"robots.txt": "hello",
"index.html": "world",
}))
defer server.Close()
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM busybox
COPY file*.txt /tmp/
RUN ls /tmp/file1.txt /tmp/file2.txt
RUN [ "mkdir", "/tmp1" ]
COPY dir* /tmp1/
RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file
RUN [ "mkdir", "/tmp2" ]
ADD dir/*dir %s/robots.txt /tmp2/
RUN ls /tmp2/nest_nest_file /tmp2/robots.txt
`, server.URL())),
fakecontext.WithFiles(map[string]string{
"file1.txt": "test1",
"file2.txt": "test2",
"dir/nested_file": "nested file",
"dir/nested_dir/nest_nest_file": "2 times nested",
"dirt": "dirty",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Now make sure we use a cache the 2nd time
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) {
// Run this only on Linux
// Below is the original comment (that I don't agree with — vdemeester)
// Normally we would do c.Fatal(err) here but given that
// the odds of this failing are so rare, it must be because
// the OS we're running the client on doesn't support * in
// filenames (like windows). So, instead of failing the test
// just let it pass. Then we don't need to explicitly
// say which OSs this works on or not.
testRequires(c, DaemonIsLinux, UnixCli)
buildImageSuccessfully(c, "testcopywildcardinname", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
COPY *.txt /tmp/
RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ]
`),
build.WithFile("*.txt", "hi there"),
))
}
func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) {
name := "testcopywildcardcache"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
COPY file1.txt /tmp/`),
fakecontext.WithFiles(map[string]string{
"file1.txt": "test1",
}))
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Now make sure we use a cache the 2nd time even with wild cards.
// Use the same context so the file is the same and the checksum will match
ctx.Add("Dockerfile", `FROM busybox
COPY file*.txt /tmp/`)
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddsinglefiletononexistingdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testadddircontenttoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testadddircontenttoexistingdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddwholedirtoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
build.WithFile("test_dir/test_file", "test1")))
}
// Testing #5941 : Having an etc directory in context conflicts with the /etc/mtab
func (s *DockerSuite) TestBuildAddOrCopyEtcToRootShouldNotConflict(c *check.C) {
buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+`
ADD . /`),
build.WithFile("etc/test_file", "test1")))
buildImageSuccessfully(c, "testcopyetctoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+`
COPY . /`),
build.WithFile("etc/test_file", "test1")))
}
// Testing #9401 : Losing setuid flag after a ADD
func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
ADD suidbin /usr/bin/suidbin
RUN chmod 4755 /usr/bin/suidbin
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]
ADD ./data/ /
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`),
build.WithFile("suidbin", "suidbin"),
build.WithFile("/data/usr/test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopysinglefiletoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
build.WithFile("test_file", "test1")))
}
// Issue #3960: "ADD src ." hangs - adapted for COPY
func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) {
name := "testcopysinglefiletoworkdir"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
COPY test_file .`),
fakecontext.WithFiles(map[string]string{
"test_file": "test1",
}))
defer ctx.Close()
errChan := make(chan error)
go func() {
errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopysinglefiletoexistdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific
buildImageSuccessfully(c, "testcopysinglefiletononexistdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopydircontenttoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopydircontenttoexistdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopywholedirtoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently working on Windows
dockerfile := `
FROM scratch
ADD links.tar /
ADD foo.txt /symlink/
`
targetFile := "foo.txt"
var (
name = "test-link-absolute"
)
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile))
defer ctx.Close()
tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
var symlinkTarget string
if runtime.GOOS == "windows" {
var driveLetter string
if abs, err := filepath.Abs(tempDir); err != nil {
c.Fatal(err)
} else {
driveLetter = abs[:1]
}
tempDirWithoutDrive := tempDir[2:]
symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive)
} else {
symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir)
}
tarPath := filepath.Join(ctx.Dir, "links.tar")
nonExistingFile := filepath.Join(tempDir, targetFile)
fooPath := filepath.Join(ctx.Dir, targetFile)
tarOut, err := os.Create(tarPath)
if err != nil {
c.Fatal(err)
}
tarWriter := tar.NewWriter(tarOut)
header := &tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: symlinkTarget,
Mode: 0755,
Uid: 0,
Gid: 0,
}
err = tarWriter.WriteHeader(header)
if err != nil {
c.Fatal(err)
}
tarWriter.Close()
tarOut.Close()
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) {
testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox
const (
dockerfileTemplate = `
FROM busybox
RUN ln -s /../../../../../../../../%s /x
VOLUME /x
ADD foo.txt /x/`
targetFile = "foo.txt"
)
var (
name = "test-link-absolute-volume"
dockerfile = ""
)
tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir)
nonExistingFile := filepath.Join(tempDir, targetFile)
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile))
defer ctx.Close()
fooPath := filepath.Join(ctx.Dir, targetFile)
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
// Issue #5270 - ensure we throw a better error than "unexpected EOF"
// when we can't access files in the context.
func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) {
testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows
{
name := "testbuildinaccessiblefiles"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"),
fakecontext.WithFiles(map[string]string{"fileWithoutReadAccess": "foo"}),
)
defer ctx.Close()
// This is used to ensure we detect inaccessible files early during build in the cli client
pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess")
if err := os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown file to root: %s", err)
}
if err := os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
Dir: ctx.Dir,
})
if result.Error == nil {
c.Fatalf("build should have failed: %s %s", result.Error, result.Combined())
}
// check if we've detected the failure before we started building
if !strings.Contains(result.Combined(), "no permission to read from ") {
c.Fatalf("output should've contained the string: no permission to read from but contained: %s", result.Combined())
}
if !strings.Contains(result.Combined(), "error checking context") {
c.Fatalf("output should've contained the string: error checking context")
}
}
{
name := "testbuildinaccessibledirectory"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"),
fakecontext.WithFiles(map[string]string{"directoryWeCantStat/bar": "foo"}),
)
defer ctx.Close()
// This is used to ensure we detect inaccessible directories early during build in the cli client
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
Dir: ctx.Dir,
})
if result.Error == nil {
c.Fatalf("build should have failed: %s %s", result.Error, result.Combined())
}
// check if we've detected the failure before we started building
if !strings.Contains(result.Combined(), "can't stat") {
c.Fatalf("output should've contained the string: can't access %s", result.Combined())
}
if !strings.Contains(result.Combined(), "error checking context") {
c.Fatalf("output should've contained the string: error checking context\ngot:%s", result.Combined())
}
}
{
name := "testlinksok"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"))
defer ctx.Close()
target := "../../../../../../../../../../../../../../../../../../../azA"
if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil {
c.Fatal(err)
}
defer os.Remove(target)
// This is used to ensure we don't follow links when checking if everything in the context is accessible
// This test doesn't require that we run commands as an unprivileged user
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
}
{
name := "testbuildignoredinaccessible"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"),
fakecontext.WithFiles(map[string]string{
"directoryWeCantStat/bar": "foo",
".dockerignore": "directoryWeCantStat",
}),
)
defer ctx.Close()
// This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Dir: ctx.Dir,
Command: []string{"su", "unprivilegeduser", "-c",
fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
})
result.Assert(c, icmd.Expected{})
}
}
func (s *DockerSuite) TestBuildForceRm(c *check.C) {
containerCountBefore := getContainerCount(c)
name := "testbuildforcerm"
r := buildImage(name, cli.WithFlags("--force-rm"), build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN true
RUN thiswillfail`)))
if r.ExitCode != 1 && r.ExitCode != 127 { // different on Linux / Windows
c.Fatalf("Wrong exit code")
}
containerCountAfter := getContainerCount(c)
if containerCountBefore != containerCountAfter {
c.Fatalf("--force-rm shouldn't have left containers behind")
}
}
func (s *DockerSuite) TestBuildRm(c *check.C) {
name := "testbuildrm"
testCases := []struct {
buildflags []string
shouldLeftContainerBehind bool
}{
// Default case (i.e. --rm=true)
{
buildflags: []string{},
shouldLeftContainerBehind: false,
},
{
buildflags: []string{"--rm"},
shouldLeftContainerBehind: false,
},
{
buildflags: []string{"--rm=false"},
shouldLeftContainerBehind: true,
},
}
for _, tc := range testCases {
containerCountBefore := getContainerCount(c)
buildImageSuccessfully(c, name, cli.WithFlags(tc.buildflags...), build.WithDockerfile(`FROM busybox
RUN echo hello world`))
containerCountAfter := getContainerCount(c)
if tc.shouldLeftContainerBehind {
if containerCountBefore == containerCountAfter {
c.Fatalf("flags %v should have left containers behind", tc.buildflags)
}
} else {
if containerCountBefore != containerCountAfter {
c.Fatalf("flags %v shouldn't have left containers behind", tc.buildflags)
}
}
dockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildWithVolumes(c *check.C) {
testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows
var (
result map[string]map[string]struct{}
name = "testbuildvolumes"
emptyMap = make(map[string]struct{})
expected = map[string]map[string]struct{}{
"/test1": emptyMap,
"/test2": emptyMap,
"/test3": emptyMap,
"/test4": emptyMap,
"/test5": emptyMap,
"/test6": emptyMap,
"[/test7": emptyMap,
"/test8]": emptyMap,
}
)
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
VOLUME /test1
VOLUME /test2
VOLUME /test3 /test4
VOLUME ["/test5", "/test6"]
VOLUME [/test7 /test8]
`))
inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result)
equal := reflect.DeepEqual(&result, &expected)
if !equal {
c.Fatalf("Volumes %s, expected %s", result, expected)
}
}
func (s *DockerSuite) TestBuildMaintainer(c *check.C) {
name := "testbuildmaintainer"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio`))
expected := "dockerio"
res := inspectField(c, name, "Author")
if res != expected {
c.Fatalf("Maintainer %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildUser(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuilduser"
expected := "dockerio"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
USER dockerio
RUN [ $(whoami) = 'dockerio' ]`))
res := inspectField(c, name, "Config.User")
if res != expected {
c.Fatalf("User %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) {
name := "testbuildrelativeworkdir"
var (
expected1 string
expected2 string
expected3 string
expected4 string
expectedFinal string
)
if testEnv.DaemonPlatform() == "windows" {
expected1 = `C:/`
expected2 = `C:/test1`
expected3 = `C:/test2`
expected4 = `C:/test2/test3`
expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox
} else {
expected1 = `/`
expected2 = `/test1`
expected3 = `/test2`
expected4 = `/test2/test3`
expectedFinal = `/test2/test3`
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN sh -c "[ "$PWD" = "`+expected1+`" ]"
WORKDIR test1
RUN sh -c "[ "$PWD" = "`+expected2+`" ]"
WORKDIR /test2
RUN sh -c "[ "$PWD" = "`+expected3+`" ]"
WORKDIR test3
RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`))
res := inspectField(c, name, "Config.WorkingDir")
if res != expectedFinal {
c.Fatalf("Workdir %s, expected %s", res, expectedFinal)
}
}
// #22181 Regression test. Single end-to-end test of using
// Windows semantics. Most path handling verifications are in unit tests
func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
buildImageSuccessfully(c, "testbuildwindowsworkdirprocessing", build.WithDockerfile(`FROM busybox
WORKDIR C:\\foo
WORKDIR bar
RUN sh -c "[ "$PWD" = "C:/foo/bar" ]"
`))
}
// #22181 Regression test. Most paths handling verifications are in unit test.
// One functional test for end-to-end
func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
// TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to
// support backslash such as .\\ being equivalent to ./ and c:\\ being
// equivalent to c:/. This is not currently (nor ever has been) supported
// by docker on the Windows platform.
buildImageSuccessfully(c, "testbuildwindowsaddcopypathprocessing", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
# No trailing slash on COPY/ADD
# Results in dir being changed to a file
WORKDIR /wc1
COPY wc1 c:/wc1
WORKDIR /wc2
ADD wc2 c:/wc2
WORKDIR c:/
RUN sh -c "[ $(cat c:/wc1/wc1) = 'hellowc1' ]"
RUN sh -c "[ $(cat c:/wc2/wc2) = 'worldwc2' ]"
# Trailing slash on COPY/ADD, Windows-style path.
WORKDIR /wd1
COPY wd1 c:/wd1/
WORKDIR /wd2
ADD wd2 c:/wd2/
RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]"
RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]"
`),
build.WithFile("wc1", "hellowc1"),
build.WithFile("wc2", "worldwc2"),
build.WithFile("wd1", "hellowd1"),
build.WithFile("wd2", "worldwd2"),
))
}
func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) {
name := "testbuildworkdirwithenvvariables"
var expected string
if testEnv.DaemonPlatform() == "windows" {
expected = `C:\test1\test2`
} else {
expected = `/test1/test2`
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENV DIRPATH /test1
ENV SUBDIRNAME test2
WORKDIR $DIRPATH
WORKDIR $SUBDIRNAME/$MISSING_VAR`))
res := inspectField(c, name, "Config.WorkingDir")
if res != expected {
c.Fatalf("Workdir %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) {
// cat /test1/test2/foo gets permission denied for the user
testRequires(c, NotUserNamespace)
var expected string
if testEnv.DaemonPlatform() == "windows" {
expected = `C:/test1/test2`
} else {
expected = `/test1/test2`
}
buildImageSuccessfully(c, "testbuildrelativecopy", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
WORKDIR /test1
WORKDIR test2
RUN sh -c "[ "$PWD" = '`+expected+`' ]"
COPY foo ./
RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]"
ADD foo ./bar/baz
RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]"
COPY foo ./bar/baz2
RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]"
WORKDIR ..
COPY foo ./
RUN sh -c "[ $(cat /test1/foo) = 'hello' ]"
COPY foo /test3/
RUN sh -c "[ $(cat /test3/foo) = 'hello' ]"
WORKDIR /test4
COPY . .
RUN sh -c "[ $(cat /test4/foo) = 'hello' ]"
WORKDIR /test5/test6
COPY foo ../
RUN sh -c "[ $(cat /test5/foo) = 'hello' ]"
`),
build.WithFile("foo", "hello"),
))
}
func (s *DockerSuite) TestBuildBlankName(c *check.C) {
name := "testbuildblankname"
testCases := []struct {
expression string
expectedStderr string
}{
{
expression: "ENV =",
expectedStderr: "ENV names can not be blank",
},
{
expression: "LABEL =",
expectedStderr: "LABEL names can not be blank",
},
{
expression: "ARG =foo",
expectedStderr: "ARG names can not be blank",
},
}
for _, tc := range testCases {
buildImage(name, build.WithDockerfile(fmt.Sprintf(`FROM busybox
%s`, tc.expression))).Assert(c, icmd.Expected{
ExitCode: 1,
Err: tc.expectedStderr,
})
}
}
func (s *DockerSuite) TestBuildEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
name := "testbuildenv"
expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENV PATH /test:$PATH
ENV PORT 2375
RUN [ $(env | grep PORT) = 'PORT=2375' ]`))
res := inspectField(c, name, "Config.Env")
if res != expected {
c.Fatalf("Env %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildPATH(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
fn := func(dockerfile string, expected string) {
buildImageSuccessfully(c, "testbldpath", build.WithDockerfile(dockerfile))
res := inspectField(c, "testbldpath", "Config.Env")
if res != expected {
c.Fatalf("Env %q, expected %q for dockerfile:%q", res, expected, dockerfile)
}
}
tests := []struct{ dockerfile, exp string }{
{"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM scratch\nENV PATH=/test", "[PATH=/test]"},
{"FROM busybox\nENV PATH=/test", "[PATH=/test]"},
{"FROM scratch\nENV PATH=''", "[PATH=]"},
{"FROM busybox\nENV PATH=''", "[PATH=]"},
}
for _, test := range tests {
fn(test.dockerfile, test.exp)
}
}
func (s *DockerSuite) TestBuildContextCleanup(c *check.C) {
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/echo"]`))
entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = compareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) {
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
RUN /non/existing/command`)).Assert(c, icmd.Expected{
ExitCode: 1,
})
entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = compareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
// compareDirectoryEntries compares two sets of FileInfo (usually taken from a directory)
// and returns an error if different.
func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error {
var (
e1Entries = make(map[string]struct{})
e2Entries = make(map[string]struct{})
)
for _, e := range e1 {
e1Entries[e.Name()] = struct{}{}
}
for _, e := range e2 {
e2Entries[e.Name()] = struct{}{}
}
if !reflect.DeepEqual(e1Entries, e2Entries) {
return fmt.Errorf("entries differ")
}
return nil
}
func (s *DockerSuite) TestBuildCmd(c *check.C) {
name := "testbuildcmd"
expected := "[/bin/echo Hello World]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
CMD ["/bin/echo", "Hello World"]`))
res := inspectField(c, name, "Config.Cmd")
if res != expected {
c.Fatalf("Cmd %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExpose(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexpose"
expected := "map[2375/tcp:{}]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
EXPOSE 2375`))
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
// start building docker file with a large number of ports
portList := make([]string, 50)
line := make([]string, 100)
expectedPorts := make([]int, len(portList)*len(line))
for i := 0; i < len(portList); i++ {
for j := 0; j < len(line); j++ {
p := i*len(line) + j + 1
line[j] = strconv.Itoa(p)
expectedPorts[p-1] = p
}
if i == len(portList)-1 {
portList[i] = strings.Join(line, " ")
} else {
portList[i] = strings.Join(line, " ") + ` \`
}
}
dockerfile := `FROM scratch
EXPOSE {{range .}} {{.}}
{{end}}`
tmpl := template.Must(template.New("dockerfile").Parse(dockerfile))
buf := bytes.NewBuffer(nil)
tmpl.Execute(buf, portList)
name := "testbuildexpose"
buildImageSuccessfully(c, name, build.WithDockerfile(buf.String()))
// check if all the ports are saved inside Config.ExposedPorts
res := inspectFieldJSON(c, name, "Config.ExposedPorts")
var exposedPorts map[string]interface{}
if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil {
c.Fatal(err)
}
for _, p := range expectedPorts {
ep := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[ep]; !ok {
c.Errorf("Port(%s) is not exposed", ep)
} else {
delete(exposedPorts, ep)
}
}
if len(exposedPorts) != 0 {
c.Errorf("Unexpected extra exposed ports %v", exposedPorts)
}
}
func (s *DockerSuite) TestBuildExposeOrder(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
buildID := func(name, exposed string) string {
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch
EXPOSE %s`, exposed)))
id := inspectField(c, name, "Id")
return id
}
id1 := buildID("testbuildexpose1", "80 2375")
id2 := buildID("testbuildexpose2", "2375 80")
if id1 != id2 {
c.Errorf("EXPOSE should invalidate the cache only when ports actually changed")
}
}
func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexposeuppercaseproto"
expected := "map[5678/udp:{}]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
EXPOSE 5678/UDP`))
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) {
name := "testbuildentrypointinheritance"
name2 := "testbuildentrypointinheritance2"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENTRYPOINT ["/bin/echo"]`))
res := inspectField(c, name, "Config.Entrypoint")
expected := "[/bin/echo]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s
ENTRYPOINT []`, name)))
res = inspectField(c, name2, "Config.Entrypoint")
expected = "[]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENTRYPOINT []`))
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[/bin/echo]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/echo"]`))
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
// #6445 ensure ONBUILD triggers aren't committed to grandchildren
func (s *DockerSuite) TestBuildOnBuildLimitedInheritance(c *check.C) {
buildImageSuccessfully(c, "testonbuildtrigger1", build.WithDockerfile(`
FROM busybox
RUN echo "GRANDPARENT"
ONBUILD RUN echo "ONBUILD PARENT"
`))
// ONBUILD should be run in second build.
buildImage("testonbuildtrigger2", build.WithDockerfile("FROM testonbuildtrigger1")).Assert(c, icmd.Expected{
Out: "ONBUILD PARENT",
})
// ONBUILD should *not* be run in third build.
result := buildImage("testonbuildtrigger3", build.WithDockerfile("FROM testonbuildtrigger2"))
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), "ONBUILD PARENT") {
c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent")
}
}
func (s *DockerSuite) TestBuildSameDockerfileWithAndWithoutCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildwithcache"
dockerfile := `FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`
buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile))
id2 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
// Make sure that ADD/COPY still populate the cache even if they don't use it
func (s *DockerSuite) TestBuildConditionalCache(c *check.C) {
name := "testbuildconditionalcache"
dockerfile := `
FROM busybox
ADD foo /tmp/`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "hello",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
if err := ctx.Add("foo", "bye"); err != nil {
c.Fatalf("Error modifying foo: %s", err)
}
// Updating a file should invalidate the cache
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id2 == id1 {
c.Fatal("Should not have used the cache")
}
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, name)
if id3 != id2 {
c.Fatal("Should have used the cache")
}
}
func (s *DockerSuite) TestBuildAddMultipleLocalFileWithAndWithoutCache(c *check.C) {
name := "testbuildaddmultiplelocalfilewithcache"
baseName := name + "-base"
cli.BuildCmd(c, baseName, build.WithDockerfile(`
FROM busybox
ENTRYPOINT ["/bin/sh"]
`))
dockerfile := `
FROM testbuildaddmultiplelocalfilewithcache-base
MAINTAINER dockerio
ADD foo Dockerfile /usr/lib/bla/
RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{
"foo": "hello",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
result2 := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
result3 := cli.BuildCmd(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatalf("The cache should have been used but hasn't: %s", result2.Stdout())
}
if id1 == id3 {
c.Fatalf("The cache should have been invalided but hasn't: %s", result3.Stdout())
}
}
func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) {
name := "testbuildcopydirbutnotfile"
name2 := "testbuildcopydirbutnotfile2"
dockerfile := `
FROM ` + minimalBaseImage() + `
COPY dir /tmp/`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{
"dir/foo": "hello",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Check that adding file with similar name doesn't mess with cache
if err := ctx.Add("dir_file", "hello2"); err != nil {
c.Fatal(err)
}
cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name2)
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't")
}
}
func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) {
name := "testbuildaddcurrentdirwithcache"
name2 := name + "2"
name3 := name + "3"
name4 := name + "4"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{
"foo": "hello",
}))
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Check that adding file invalidate cache of "ADD ."
if err := ctx.Add("bar", "hello2"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name2, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name2)
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file invalidate cache of "ADD ."
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name3, build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, name3)
if id2 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file to same content with different mtime does not
// invalidate cache of "ADD ."
time.Sleep(1 * time.Second) // wait second because of mtime precision
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name4, build.WithExternalBuildContext(ctx))
id4 := getIDByName(c, name4)
if id3 != id4 {
c.Fatal("The cache should have been used but hasn't.")
}
}
// FIXME(vdemeester) this really seems to test the same thing as before (TestBuildAddMultipleLocalFileWithAndWithoutCache)
func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) {
name := "testbuildaddcurrentdirwithoutcache"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{
"foo": "hello",
}))
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileWithAndWithoutCache(c *check.C) {
name := "testbuildaddremotefilewithcache"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"baz": "hello",
}))
defer server.Close()
dockerfile := fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL())
cli.BuildCmd(c, name, build.WithDockerfile(dockerfile))
id1 := getIDByName(c, name)
cli.BuildCmd(c, name, build.WithDockerfile(dockerfile))
id2 := getIDByName(c, name)
cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) {
name := "testbuildaddremotefilemtime"
name2 := name + "2"
name3 := name + "3"
files := map[string]string{"baz": "hello"}
server := fakestorage.New(c, "", fakecontext.WithFiles(files))
defer server.Close()
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL())))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name2)
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't - #1")
}
// Now create a different server with same contents (causes different mtime)
// The cache should still be used
// allow some time for clock to pass as mtime precision is only 1s
time.Sleep(2 * time.Second)
server2 := fakestorage.New(c, "", fakecontext.WithFiles(files))
defer server2.Close()
ctx2 := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server2.URL())))
defer ctx2.Close()
cli.BuildCmd(c, name3, build.WithExternalBuildContext(ctx2))
id3 := getIDByName(c, name3)
if id1 != id3 {
c.Fatal("The cache should have been used but wasn't")
}
}
// FIXME(vdemeester) this really seems to test the same thing as before (combined)
func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithAndWithoutCache(c *check.C) {
name := "testbuildaddlocalandremotefilewithcache"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"baz": "hello",
}))
defer server.Close()
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
ADD %s/baz /usr/lib/baz/quux`, server.URL())),
fakecontext.WithFiles(map[string]string{
"foo": "hello world",
}))
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalidated but hasn't.")
}
}
func testContextTar(c *check.C, compression archive.Compression) {
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`FROM busybox
ADD foo /foo
CMD ["cat", "/foo"]`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}),
)
defer ctx.Close()
context, err := archive.Tar(ctx.Dir, compression)
if err != nil {
c.Fatalf("failed to build context tar: %v", err)
}
name := "contexttar"
cli.BuildCmd(c, name, build.WithStdinContext(context))
}
func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) {
testContextTar(c, archive.Gzip)
}
func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) {
testContextTar(c, archive.Uncompressed)
}
func (s *DockerSuite) TestBuildNoContext(c *check.C) {
name := "nocontext"
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-"},
Stdin: strings.NewReader(
`FROM busybox
CMD ["echo", "ok"]`),
}).Assert(c, icmd.Success)
if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" {
c.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
}
}
func (s *DockerSuite) TestBuildDockerfileStdin(c *check.C) {
name := "stdindockerfile"
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("bar"), 0600)
c.Assert(err, check.IsNil)
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir},
Stdin: strings.NewReader(
`FROM busybox
ADD foo /foo
CMD ["cat", "/foo"]`),
}).Assert(c, icmd.Success)
res := inspectField(c, name, "Config.Cmd")
c.Assert(strings.TrimSpace(string(res)), checker.Equals, `[cat /foo]`)
}
func (s *DockerSuite) TestBuildDockerfileStdinConflict(c *check.C) {
name := "stdindockerfiletarcontext"
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-f", "-", "-"},
}).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "use stdin for both build context and dockerfile",
})
}
func (s *DockerSuite) TestBuildDockerfileStdinNoExtraFiles(c *check.C) {
s.testBuildDockerfileStdinNoExtraFiles(c, false, false)
}
func (s *DockerSuite) TestBuildDockerfileStdinDockerignore(c *check.C) {
s.testBuildDockerfileStdinNoExtraFiles(c, true, false)
}
func (s *DockerSuite) TestBuildDockerfileStdinDockerignoreIgnored(c *check.C) {
s.testBuildDockerfileStdinNoExtraFiles(c, true, true)
}
func (s *DockerSuite) testBuildDockerfileStdinNoExtraFiles(c *check.C, hasDockerignore, ignoreDockerignore bool) {
name := "stdindockerfilenoextra"
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
writeFile := func(filename, content string) {
err = ioutil.WriteFile(filepath.Join(tmpDir, filename), []byte(content), 0600)
c.Assert(err, check.IsNil)
}
writeFile("foo", "bar")
if hasDockerignore {
// Add an empty Dockerfile to verify that it is not added to the image
writeFile("Dockerfile", "")
ignores := "Dockerfile\n"
if ignoreDockerignore {
ignores += ".dockerignore\n"
}
writeFile(".dockerignore", ignores)
}
result := icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir},
Stdin: strings.NewReader(
`FROM busybox
COPY . /baz`),
})
result.Assert(c, icmd.Success)
result = cli.DockerCmd(c, "run", "--rm", name, "ls", "-A", "/baz")
if hasDockerignore && !ignoreDockerignore {
c.Assert(result.Stdout(), checker.Equals, ".dockerignore\nfoo\n")
} else {
c.Assert(result.Stdout(), checker.Equals, "foo\n")
}
}
func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildimg"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox:latest
RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test
VOLUME /test`))
out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test")
if expected := "drw-------"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
if expected := "daemon daemon"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
}
// testing #1405 - config.Cmd does not get cleaned up if
// utilizing cache
func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) {
name := "testbuildcmdcleanup"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN echo "hello"`))
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo "hello"
ADD foo /foo
ENTRYPOINT ["/bin/echo"]`),
build.WithFile("foo", "hello")))
res := inspectField(c, name, "Config.Cmd")
// Cmd must be cleaned up
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
}
func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) {
name := "testbuildaddnotfound"
expected := "foo: no such file or directory"
if testEnv.DaemonPlatform() == "windows" {
expected = "foo: The system cannot find the file specified"
}
buildImage(name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+`
ADD foo /usr/local/bar`),
build.WithFile("bar", "hello"))).Assert(c, icmd.Expected{
ExitCode: 1,
Err: expected,
})
}
func (s *DockerSuite) TestBuildInheritance(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildinheritance"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
EXPOSE 2375`))
ports1 := inspectField(c, name, "Config.ExposedPorts")
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s
ENTRYPOINT ["/bin/echo"]`, name)))
res := inspectField(c, name, "Config.Entrypoint")
if expected := "[/bin/echo]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
ports2 := inspectField(c, name, "Config.ExposedPorts")
if ports1 != ports2 {
c.Fatalf("Ports must be same: %s != %s", ports1, ports2)
}
}
func (s *DockerSuite) TestBuildFails(c *check.C) {
name := "testbuildfails"
buildImage(name, build.WithDockerfile(`FROM busybox
RUN sh -c "exit 23"`)).Assert(c, icmd.Expected{
ExitCode: 23,
Err: "returned a non-zero code: 23",
})
}
func (s *DockerSuite) TestBuildOnBuild(c *check.C) {
name := "testbuildonbuild"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ONBUILD RUN touch foobar`))
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s
RUN [ -f foobar ]`, name)))
}
// gh #2446
func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) {
makeLink := `ln -s /foo /bar`
if testEnv.DaemonPlatform() == "windows" {
makeLink = `mklink /D C:\bar C:\foo`
}
name := "testbuildaddtosymlinkdest"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
RUN sh -c "mkdir /foo"
RUN `+makeLink+`
ADD foo /bar/
RUN sh -c "[ -f /bar/foo ]"
RUN sh -c "[ -f /foo/foo ]"`),
build.WithFile("foo", "hello"),
))
}
func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) {
name := "testbuildescapewhitespace"
buildImageSuccessfully(c, name, build.WithDockerfile(`
# ESCAPE=\
FROM busybox
MAINTAINER "Docker \
IO <io@\
docker.com>"
`))
res := inspectField(c, name, "Author")
if res != "\"Docker IO <[email protected]>\"" {
c.Fatalf("Parsed string did not match the escaped string. Got: %q", res)
}
}
func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) {
// Verify that strings that look like ints are still passed as strings
name := "testbuildstringing"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
MAINTAINER 123`))
out, _ := dockerCmd(c, "inspect", name)
if !strings.Contains(out, "\"123\"") {
c.Fatalf("Output does not contain the int as a string:\n%s", out)
}
}
func (s *DockerSuite) TestBuildDockerignore(c *check.C) {
name := "testbuilddockerignore"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ ! -e /bla/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ ! -e v.cc ]]"
RUN sh -c "[[ ! -e src/v.cc ]]"
RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"`),
build.WithFile("Makefile", "all:"),
build.WithFile(".git/HEAD", "ref: foo"),
build.WithFile("src/x.go", "package main"),
build.WithFile("src/_vendor/v.go", "package main"),
build.WithFile("src/_vendor/v.cc", "package main"),
build.WithFile("src/v.cc", "package main"),
build.WithFile("v.cc", "package main"),
build.WithFile("dir/foo", ""),
build.WithFile(".gitignore", ""),
build.WithFile("README.md", "readme"),
build.WithFile(".dockerignore", `
.git
pkg
.gitignore
src/_vendor
*.md
**/*.cc
dir`),
))
}
func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) {
name := "testbuilddockerignorecleanpaths"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD . /tmp/
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"`),
build.WithFile("foo", "foo"),
build.WithFile("foo2", "foo2"),
build.WithFile("dir1/foo", "foo in dir1"),
build.WithFile(".dockerignore", "./foo\ndir1//foo\n./dir1/../foo2"),
))
}
func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) {
name := "testbuilddockerignoreexceptions"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ -e /bla/dir/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/dir/foo1 ]]"
RUN sh -c "[[ -f /bla/dir/e ]]"
RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ -e /bla/dir/a.cc ]]"`),
build.WithFile("Makefile", "all:"),
build.WithFile(".git/HEAD", "ref: foo"),
build.WithFile("src/x.go", "package main"),
build.WithFile("src/_vendor/v.go", "package main"),
build.WithFile("dir/foo", ""),
build.WithFile("dir/foo1", ""),
build.WithFile("dir/dir/f1", ""),
build.WithFile("dir/dir/foo", ""),
build.WithFile("dir/e", ""),
build.WithFile("dir/e-dir/foo", ""),
build.WithFile(".gitignore", ""),
build.WithFile("README.md", "readme"),
build.WithFile("dir/a.cc", "hello"),
build.WithFile(".dockerignore", `
.git
pkg
.gitignore
src/_vendor
*.md
dir
!dir/e*
!dir/dir/foo
**/*.cc
!**/*.cc`),
))
}
func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", "Dockerfile\n"),
))
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", "./Dockerfile\n"),
))
}
func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls /tmp/Dockerfile
RUN sh -c "! ls /tmp/MyDockerfile"
RUN ls /tmp/.dockerignore`
buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c,
build.WithFile("Dockerfile", "Should not use me"),
build.WithFile("MyDockerfile", dockerfile),
build.WithFile(".dockerignore", "MyDockerfile\n"),
))
buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c,
build.WithFile("Dockerfile", "Should not use me"),
build.WithFile("MyDockerfile", dockerfile),
build.WithFile(".dockerignore", "./MyDockerfile\n"),
))
}
func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) {
name := "testbuilddockerignoredockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/.dockerignore"
RUN ls /tmp/Dockerfile`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", ".dockerignore\n"),
))
}
func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) {
name := "testbuilddockerignoretouchdockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
".dockerignore": "Dockerfile\n",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Didn't use the cache - 1")
}
// Now make sure touching Dockerfile doesn't invalidate the cache
if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Didn't use the cache - 2")
}
// One more time but just 'touch' it instead of changing the content
if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Didn't use the cache - 3")
}
}
func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) {
name := "testbuilddockerignorewholedir"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ ! -e /Makefile ]]"`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", "*\n"),
build.WithFile("Makefile", "all:"),
build.WithFile(".gitignore", ""),
))
}
func (s *DockerSuite) TestBuildDockerignoringOnlyDotfiles(c *check.C) {
name := "testbuilddockerignorewholedir"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", ".*"),
build.WithFile("Makefile", "all:"),
build.WithFile(".gitignore", ""),
))
}
func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) {
name := "testbuilddockerignorebadexclusion"
buildImage(name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`),
build.WithFile("Makefile", "all:"),
build.WithFile(".gitignore", ""),
build.WithFile(".dockerignore", "!\n"),
)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "error checking context: 'illegal exclusion pattern: \"!\"",
})
}
func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) {
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.dockerignore ]]"
RUN sh -c "[[ ! -e /Dockerfile ]]"
RUN sh -c "[[ ! -e /file1 ]]"
RUN sh -c "[[ ! -e /dir ]]"`
// All of these should result in ignoring all files
for _, variant := range []string{"**", "**/", "**/**", "*"} {
buildImageSuccessfully(c, "noname", build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("file1", ""),
build.WithFile("dir/file1", ""),
build.WithFile(".dockerignore", variant),
))
dockerCmd(c, "rmi", "noname")
}
}
func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) {
dockerfile := `
FROM busybox
COPY . /
#RUN sh -c "[[ -e /.dockerignore ]]"
RUN sh -c "[[ -e /Dockerfile ]] && \
[[ ! -e /file0 ]] && \
[[ ! -e /dir1/file0 ]] && \
[[ ! -e /dir2/file0 ]] && \
[[ ! -e /file1 ]] && \
[[ ! -e /dir1/file1 ]] && \
[[ ! -e /dir1/dir2/file1 ]] && \
[[ ! -e /dir1/file2 ]] && \
[[ -e /dir1/dir2/file2 ]] && \
[[ ! -e /dir1/dir2/file4 ]] && \
[[ ! -e /dir1/dir2/file5 ]] && \
[[ ! -e /dir1/dir2/file6 ]] && \
[[ ! -e /dir1/dir3/file7 ]] && \
[[ ! -e /dir1/dir3/file8 ]] && \
[[ -e /dir1/dir3 ]] && \
[[ -e /dir1/dir4 ]] && \
[[ ! -e 'dir1/dir5/fileAA' ]] && \
[[ -e 'dir1/dir5/fileAB' ]] && \
[[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing
RUN echo all done!`
dockerignore := `
**/file0
**/*file1
**/dir1/file2
dir1/**/file4
**/dir2/file5
**/dir1/dir2/file6
dir1/dir3/**
**/dir4/**
**/file?A
**/file\?B
**/dir5/file.
`
buildImageSuccessfully(c, "noname", build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", dockerignore),
build.WithFile("dir1/file0", ""),
build.WithFile("dir1/dir2/file0", ""),
build.WithFile("file1", ""),
build.WithFile("dir1/file1", ""),
build.WithFile("dir1/dir2/file1", ""),
build.WithFile("dir1/file2", ""),
build.WithFile("dir1/dir2/file2", ""), // remains
build.WithFile("dir1/dir2/file4", ""),
build.WithFile("dir1/dir2/file5", ""),
build.WithFile("dir1/dir2/file6", ""),
build.WithFile("dir1/dir3/file7", ""),
build.WithFile("dir1/dir3/file8", ""),
build.WithFile("dir1/dir4/file9", ""),
build.WithFile("dir1/dir5/fileAA", ""),
build.WithFile("dir1/dir5/fileAB", ""),
build.WithFile("dir1/dir5/fileB", ""),
))
}
func (s *DockerSuite) TestBuildLineBreak(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildlinebreak"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN sh -c 'echo root:testpass \
> /tmp/passwd'
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`))
}
func (s *DockerSuite) TestBuildEOLInLine(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildeolinline"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN sh -c 'echo root:testpass > /tmp/passwd'
RUN echo "foo \n bar"; echo "baz"
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`))
}
func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildcomments"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
# This is an ordinary comment.
RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh
RUN [ ! -x /hello.sh ]
# comment with line break \
RUN chmod +x /hello.sh
RUN [ -x /hello.sh ]
RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ]
RUN [ "$(/hello.sh)" = "hello world" ]`))
}
func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildusers"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
# Make sure our defaults work
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ]
# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0)
USER root
RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ]
# Setup dockerio user and group
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \
echo 'dockerio:x:1001:' >> /etc/group
# Make sure we can switch to our user and all the information is exactly as we expect it to be
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
# Switch back to root and double check that worked exactly as we might expect it to
USER root
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \
# Add a "supplementary" group for our dockerio user
echo 'supplementary:x:1002:dockerio' >> /etc/group
# ... and then go verify that we get it like we expect
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
USER 1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
# super test the new "user:group" syntax
USER dockerio:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER dockerio:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
# make sure unknown uid/gid still works properly
USER 1042:1043
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`))
}
// FIXME(vdemeester) rename this test (and probably "merge" it with the one below TestBuildEnvUsage2)
func (s *DockerSuite) TestBuildEnvUsage(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage"
dockerfile := `FROM busybox
ENV HOME /root
ENV PATH $HOME/bin:$PATH
ENV PATH /tmp:$PATH
RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ]
ENV FOO /foo/baz
ENV BAR /bar
ENV BAZ $BAR
ENV FOOPATH $PATH:$FOO
RUN [ "$BAR" = "$BAZ" ]
RUN [ "$FOOPATH" = "$PATH:/foo/baz" ]
ENV FROM hello/docker/world
ENV TO /docker/world/hello
ADD $FROM $TO
RUN [ "$(cat $TO)" = "hello" ]
ENV abc=def
ENV ghi=$abc
RUN [ "$ghi" = "def" ]
`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("hello/docker/world", "hello"),
))
}
// FIXME(vdemeester) rename this test (and probably "merge" it with the one above TestBuildEnvUsage)
func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage2"
dockerfile := `FROM busybox
ENV abc=def def="hello world"
RUN [ "$abc,$def" = "def,hello world" ]
ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too"
RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ]
ENV abc=zzz FROM=hello/docker/world
ENV abc=zzz TO=/docker/world/hello
ADD $FROM $TO
RUN [ "$abc,$(cat $TO)" = "zzz,hello" ]
ENV abc 'yyy'
RUN [ $abc = 'yyy' ]
ENV abc=
RUN [ "$abc" = "" ]
# use grep to make sure if the builder substitutes \$foo by mistake
# we don't get a false positive
ENV abc=\$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc \$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc=\'foo\' abc2=\"foo\"
RUN [ "$abc,$abc2" = "'foo',\"foo\"" ]
ENV abc "foo"
RUN [ "$abc" = "foo" ]
ENV abc 'foo'
RUN [ "$abc" = 'foo' ]
ENV abc \'foo\'
RUN [ "$abc" = "'foo'" ]
ENV abc \"foo\"
RUN [ "$abc" = '"foo"' ]
ENV abc=ABC
RUN [ "$abc" = "ABC" ]
ENV def1=${abc:-DEF} def2=${ccc:-DEF}
ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:}
RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ]
ENV mypath=${mypath:+$mypath:}/home
ENV mypath=${mypath:+$mypath:}/away
RUN [ "$mypath" = '/home:/away' ]
ENV e1=bar
ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11
RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ]
ENV ee1 bar
ENV ee2 $ee1
ENV ee3 $ee11
ENV ee4 \$ee1
ENV ee5 \$ee11
RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ]
ENV eee1="foo" eee2='foo'
ENV eee3 "foo"
ENV eee4 'foo'
RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ]
`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("hello/docker/world", "hello"),
))
}
func (s *DockerSuite) TestBuildAddScript(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddscript"
dockerfile := `
FROM busybox
ADD test /test
RUN ["chmod","+x","/test"]
RUN ["/test"]
RUN [ "$(cat /testfile)" = 'test!' ]`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("test", "#!/bin/sh\necho 'test!' > /testfile"),
))
}
func (s *DockerSuite) TestBuildAddTar(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
name := "testbuildaddtar"
ctx := func() *fakecontext.Fake {
dockerfile := `
FROM busybox
ADD test.tar /
RUN cat /test/foo | grep Hi
ADD test.tar /test.tar
RUN cat /test.tar/test/foo | grep Hi
ADD test.tar /unlikely-to-exist
RUN cat /unlikely-to-exist/test/foo | grep Hi
ADD test.tar /unlikely-to-exist-trailing-slash/
RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi
RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir
ADD test.tar /existing-directory
RUN cat /existing-directory/test/foo | grep Hi
ADD test.tar /existing-directory-trailing-slash/
RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakecontext.New(c, tmpDir)
}()
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) {
name := "testbuildaddbrokentar"
ctx := func() *fakecontext.Fake {
dockerfile := `
FROM busybox
ADD test.tar /`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
// Corrupt the tar by removing one byte off the end
stat, err := testTar.Stat()
if err != nil {
c.Fatalf("failed to stat tar archive: %v", err)
}
if err := testTar.Truncate(stat.Size() - 1); err != nil {
c.Fatalf("failed to truncate tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakecontext.New(c, tmpDir)
}()
defer ctx.Close()
buildImage(name, build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
func (s *DockerSuite) TestBuildAddNonTar(c *check.C) {
name := "testbuildaddnontar"
// Should not try to extract test.tar
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD test.tar /
RUN test -f /test.tar`),
build.WithFile("test.tar", "not_a_tar_file"),
))
}
func (s *DockerSuite) TestBuildAddTarXz(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxz"
ctx := func() *fakecontext.Fake {
dockerfile := `
FROM busybox
ADD test.tar.xz /
RUN cat /test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
icmd.RunCmd(icmd.Cmd{
Command: []string{"xz", "-k", "test.tar"},
Dir: tmpDir,
}).Assert(c, icmd.Success)
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakecontext.New(c, tmpDir)
}()
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxzgz"
ctx := func() *fakecontext.Fake {
dockerfile := `
FROM busybox
ADD test.tar.xz.gz /
RUN ls /test.tar.xz.gz`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
icmd.RunCmd(icmd.Cmd{
Command: []string{"xz", "-k", "test.tar"},
Dir: tmpDir,
}).Assert(c, icmd.Success)
icmd.RunCmd(icmd.Cmd{
Command: []string{"gzip", "test.tar.xz"},
Dir: tmpDir,
})
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakecontext.New(c, tmpDir)
}()
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildFromGit(c *check.C) {
name := "testbuildfromgit"
git := fakegit.New(c, "repo", map[string]string{
"Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"first": "test git data",
}, true)
defer git.Close()
buildImageSuccessfully(c, name, build.WithContextPath(git.RepoURL))
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) {
name := "testbuildfromgit"
git := fakegit.New(c, "repo", map[string]string{
"docker/Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"docker/first": "test git data",
}, true)
defer git.Close()
buildImageSuccessfully(c, name, build.WithContextPath(fmt.Sprintf("%s#master:docker", git.RepoURL)))
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGitWithF(c *check.C) {
name := "testbuildfromgitwithf"
git := fakegit.New(c, "repo", map[string]string{
"myApp/myDockerfile": `FROM busybox
RUN echo hi from Dockerfile`,
}, true)
defer git.Close()
buildImage(name, cli.WithFlags("-f", "myApp/myDockerfile"), build.WithContextPath(git.RepoURL)).Assert(c, icmd.Expected{
Out: "hi from Dockerfile",
})
}
func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) {
name := "testbuildfromremotetarball"
buffer := new(bytes.Buffer)
tw := tar.NewWriter(buffer)
defer tw.Close()
dockerfile := []byte(`FROM busybox
MAINTAINER docker`)
if err := tw.WriteHeader(&tar.Header{
Name: "Dockerfile",
Size: int64(len(dockerfile)),
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write(dockerfile); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{
"testT.tar": buffer,
}))
defer server.Close()
cli.BuildCmd(c, name, build.WithContextPath(server.URL()+"/testT.tar"))
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) {
name := "testbuildcmdcleanuponentrypoint"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
CMD ["test"]
ENTRYPOINT ["echo"]`))
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s
ENTRYPOINT ["cat"]`, name)))
res := inspectField(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
res = inspectField(c, name, "Config.Entrypoint")
if expected := "[cat]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildClearCmd(c *check.C) {
name := "testbuildclearcmd"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/bash"]
CMD []`))
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected %s", res, "[]")
}
}
func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) {
// Skip on Windows. Base image on Windows has a CMD set in the image.
testRequires(c, DaemonIsLinux)
name := "testbuildemptycmd"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n"))
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "null" {
c.Fatalf("Cmd %s, expected %s", res, "null")
}
}
func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) {
name := "testbuildonbuildparent"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nONBUILD RUN echo foo\n"))
buildImage(name, build.WithDockerfile("FROM "+name+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{
Out: "# Executing 1 build trigger",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
name := "abcd:" + testutil.GenerateRandomAlphaOnlyString(200)
buildImage(name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{
ExitCode: 125,
Err: "invalid reference format",
})
}
func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) {
name := "testbuildcmdshc"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD echo cmd\n"))
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["/bin/sh","-c","echo cmd"]`
if testEnv.DaemonPlatform() == "windows" {
expected = `["cmd","/S","/C","echo cmd"]`
}
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) {
// Test to make sure that when we strcat arrays we take into account
// the arg separator to make sure ["echo","hi"] and ["echo hi"] don't
// look the same
name := "testbuildcmdspaces"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo hi\"]\n"))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"hi\"]\n"))
id2 := getIDByName(c, name)
if id1 == id2 {
c.Fatal("Should not have resulted in the same CMD")
}
// Now do the same with ENTRYPOINT
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo hi\"]\n"))
id1 = getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n"))
id2 = getIDByName(c, name)
if id1 == id2 {
c.Fatal("Should not have resulted in the same ENTRYPOINT")
}
}
func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) {
name := "testbuildcmdjson"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"cmd\"]"))
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["echo","cmd"]`
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildEntrypointCanBeOverriddenByChild(c *check.C) {
buildImageSuccessfully(c, "parent", build.WithDockerfile(`
FROM busybox
ENTRYPOINT exit 130
`))
icmd.RunCommand(dockerBinary, "run", "parent").Assert(c, icmd.Expected{
ExitCode: 130,
})
buildImageSuccessfully(c, "child", build.WithDockerfile(`
FROM parent
ENTRYPOINT exit 5
`))
icmd.RunCommand(dockerBinary, "run", "child").Assert(c, icmd.Expected{
ExitCode: 5,
})
}
func (s *DockerSuite) TestBuildEntrypointCanBeOverriddenByChildInspect(c *check.C) {
var (
name = "testbuildepinherit"
name2 = "testbuildepinherit2"
expected = `["/bin/sh","-c","echo quux"]`
)
if testEnv.DaemonPlatform() == "windows" {
expected = `["cmd","/S","/C","echo quux"]`
}
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT /foo/bar"))
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name)))
res := inspectFieldJSON(c, name2, "Config.Entrypoint")
if res != expected {
c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res)
}
icmd.RunCommand(dockerBinary, "run", name2).Assert(c, icmd.Expected{
Out: "quux",
})
}
func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) {
name := "testbuildentrypoint"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENTRYPOINT echo`))
dockerCmd(c, "run", "--rm", name)
}
func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildexoticshellinterpolation"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV SOME_VAR a.b.c
RUN [ "$SOME_VAR" = 'a.b.c' ]
RUN [ "${SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR%.*}" = 'a.b' ]
RUN [ "${SOME_VAR%%.*}" = 'a' ]
RUN [ "${SOME_VAR#*.}" = 'b.c' ]
RUN [ "${SOME_VAR##*.}" = 'c' ]
RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ]
RUN [ "${#SOME_VAR}" = '5' ]
RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ]
RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ]
RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ]
`))
}
func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) {
// This testcase is supposed to generate an error because the
// JSON array we're passing in on the CMD uses single quotes instead
// of double quotes (per the JSON spec). This means we interpret it
// as a "string" instead of "JSON array" and pass it on to "sh -c" and
// it should barf on it.
name := "testbuildsinglequotefails"
expectedExitCode := 2
if testEnv.DaemonPlatform() == "windows" {
expectedExitCode = 127
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
CMD [ '/bin/sh', '-c', 'echo hi' ]`))
icmd.RunCommand(dockerBinary, "run", "--rm", name).Assert(c, icmd.Expected{
ExitCode: expectedExitCode,
})
}
func (s *DockerSuite) TestBuildVerboseOut(c *check.C) {
name := "testbuildverboseout"
expected := "\n123\n"
if testEnv.DaemonPlatform() == "windows" {
expected = "\n123\r\n"
}
buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo 123`)).Assert(c, icmd.Expected{
Out: expected,
})
}
func (s *DockerSuite) TestBuildWithTabs(c *check.C) {
name := "testbuildwithtabs"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nRUN echo\tone\t\ttwo"))
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]`
expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
if testEnv.DaemonPlatform() == "windows" {
expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]`
expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
}
if res != expected1 && res != expected2 {
c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2)
}
}
func (s *DockerSuite) TestBuildLabels(c *check.C) {
name := "testbuildlabel"
expected := `{"License":"GPL","Vendor":"Acme"}`
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme
LABEL License GPL`))
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildLabelsCache(c *check.C) {
name := "testbuildlabelcache"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme`))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme`))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Build 2 should have worked & used cache(%s,%s)", id1, id2)
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme1`))
id2 = getIDByName(c, name)
if id1 == id2 {
c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s)", id1, id2)
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor Acme`))
id2 = getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Build 4 should have worked & used cache(%s,%s)", id1, id2)
}
// Now make sure the cache isn't used by mistake
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(`FROM busybox
LABEL f1=b1 f2=b2`))
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL f1=b1 f2=b2`))
id2 = getIDByName(c, name)
if id1 == id2 {
c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s)", id1, id2)
}
}
func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) {
// This test makes sure that -q works correctly when build is successful:
// stdout has only the image ID (long image ID) and stderr is empty.
outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$")
buildFlags := cli.WithFlags("-q")
tt := []struct {
Name string
BuildFunc func(string) *icmd.Result
}{
{
Name: "quiet_build_stdin_success",
BuildFunc: func(name string) *icmd.Result {
return buildImage(name, buildFlags, build.WithDockerfile("FROM busybox"))
},
},
{
Name: "quiet_build_ctx_success",
BuildFunc: func(name string) *icmd.Result {
return buildImage(name, buildFlags, build.WithBuildContext(c,
build.WithFile("Dockerfile", "FROM busybox"),
build.WithFile("quiet_build_success_fctx", "test"),
))
},
},
{
Name: "quiet_build_git_success",
BuildFunc: func(name string) *icmd.Result {
git := fakegit.New(c, "repo", map[string]string{
"Dockerfile": "FROM busybox",
}, true)
return buildImage(name, buildFlags, build.WithContextPath(git.RepoURL))
},
},
}
for _, te := range tt {
result := te.BuildFunc(te.Name)
result.Assert(c, icmd.Success)
if outRegexp.Find([]byte(result.Stdout())) == nil {
c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, result.Stdout())
}
if result.Stderr() != "" {
c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, result.Stderr())
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
testRequires(c, Network)
testName := "quiet_build_not_exists_image"
dockerfile := "FROM busybox11"
quietResult := buildImage(testName, cli.WithFlags("-q"), build.WithDockerfile(dockerfile))
quietResult.Assert(c, icmd.Expected{
ExitCode: 1,
})
result := buildImage(testName, build.WithDockerfile(dockerfile))
result.Assert(c, icmd.Expected{
ExitCode: 1,
})
if quietResult.Stderr() != result.Combined() {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, quietResult.Stderr(), result.Combined()))
}
}
func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
testCases := []struct {
testName string
dockerfile string
}{
{"quiet_build_no_from_at_the_beginning", "RUN whoami"},
{"quiet_build_unknown_instr", "FROMD busybox"},
}
for _, tc := range testCases {
quietResult := buildImage(tc.testName, cli.WithFlags("-q"), build.WithDockerfile(tc.dockerfile))
quietResult.Assert(c, icmd.Expected{
ExitCode: 1,
})
result := buildImage(tc.testName, build.WithDockerfile(tc.dockerfile))
result.Assert(c, icmd.Expected{
ExitCode: 1,
})
if quietResult.Stderr() != result.Combined() {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", tc.testName, quietResult.Stderr(), result.Combined()))
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) {
// This test ensures that when given a wrong URL, stderr in quiet mode and
// stderr in verbose mode are identical.
// TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout
URL := "http://something.invalid"
name := "quiet_build_wrong_remote"
quietResult := buildImage(name, cli.WithFlags("-q"), build.WithContextPath(URL))
quietResult.Assert(c, icmd.Expected{
ExitCode: 1,
})
result := buildImage(name, build.WithContextPath(URL))
result.Assert(c, icmd.Expected{
ExitCode: 1,
})
if strings.TrimSpace(quietResult.Stderr()) != strings.TrimSpace(result.Combined()) {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", name, quietResult.Stderr(), result.Combined()))
}
}
func (s *DockerSuite) TestBuildStderr(c *check.C) {
// This test just makes sure that no non-error output goes
// to stderr
name := "testbuildstderr"
result := buildImage(name, build.WithDockerfile("FROM busybox\nRUN echo one"))
result.Assert(c, icmd.Success)
// Windows to non-Windows should have a security warning
if runtime.GOOS == "windows" && testEnv.DaemonPlatform() != "windows" && !strings.Contains(result.Stdout(), "SECURITY WARNING:") {
c.Fatalf("Stdout contains unexpected output: %q", result.Stdout())
}
// Stderr should always be empty
if result.Stderr() != "" {
c.Fatalf("Stderr should have been empty, instead it's: %q", result.Stderr())
}
}
func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) {
testRequires(c, UnixCli, DaemonIsLinux) // test uses chown: not available on windows
name := "testbuildchownsinglefile"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY test /
RUN ls -l /test
RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ]
`),
fakecontext.WithFiles(map[string]string{
"test": "test",
}))
defer ctx.Close()
if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil {
c.Fatal(err)
}
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) {
name := "testbuildsymlinkbreakout"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(`
from busybox
add symlink.tar /
add inject /symlink/
`), 0644); err != nil {
c.Fatal(err)
}
inject := filepath.Join(ctx, "inject")
if err := ioutil.WriteFile(inject, nil, 0644); err != nil {
c.Fatal(err)
}
f, err := os.Create(filepath.Join(ctx, "symlink.tar"))
if err != nil {
c.Fatal(err)
}
w := tar.NewWriter(f)
w.WriteHeader(&tar.Header{
Name: "symlink2",
Typeflag: tar.TypeSymlink,
Linkname: "/../../../../../../../../../../../../../../",
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.WriteHeader(&tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: filepath.Join("symlink2", tmpdir),
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.Close()
f.Close()
buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(fakecontext.New(c, ctx)))
if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil {
c.Fatal("symlink breakout - inject")
} else if !os.IsNotExist(err) {
c.Fatalf("unexpected error: %v", err)
}
}
func (s *DockerSuite) TestBuildXZHost(c *check.C) {
// /usr/local/sbin/xz gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildxzhost"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD xz /usr/local/sbin/
RUN chmod 755 /usr/local/sbin/xz
ADD test.xz /
RUN [ ! -e /injected ]`),
build.WithFile("test.xz", "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00"+"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd"+"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21"),
build.WithFile("xz", "#!/bin/sh\ntouch /injected"),
))
}
func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) {
// /foo/file gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127
var (
name = "testbuildvolumescontent"
expected = "some text"
volName = "/foo"
)
if testEnv.DaemonPlatform() == "windows" {
volName = "C:/foo"
}
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
COPY content /foo/file
VOLUME `+volName+`
CMD cat /foo/file`),
build.WithFile("content", expected),
))
out, _ := dockerCmd(c, "run", "--rm", name)
if out != expected {
c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out)
}
}
// FIXME(vdemeester) part of this should be unit test, other part should be clearer
func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) {
ctx := fakecontext.New(c, "", fakecontext.WithFiles(map[string]string{
"Dockerfile": "FROM busybox\nRUN echo from Dockerfile",
"files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile",
"files/dFile": "FROM busybox\nRUN echo from files/dFile",
"dFile": "FROM busybox\nRUN echo from dFile",
"files/dFile2": "FROM busybox\nRUN echo from files/dFile2",
}))
defer ctx.Close()
cli.Docker(cli.Args("build", "-t", "test1", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
Out: "from Dockerfile",
})
cli.Docker(cli.Args("build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
Out: "from files/Dockerfile",
})
cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
Out: "from files/dFile",
})
cli.Docker(cli.Args("build", "--file=dFile", "-t", "test4", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
Out: "from dFile",
})
dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5")
c.Assert(err, check.IsNil)
nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile")
if _, err = os.Create(nonDockerfileFile); err != nil {
c.Fatal(err)
}
cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: fmt.Sprintf("unable to prepare context: the Dockerfile (%s) must be within the build context", nonDockerfileFile),
})
cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{
Out: "from Dockerfile",
})
cli.Docker(cli.Args("build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{
Out: "from files/Dockerfile",
})
cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "must be within the build context",
})
tmpDir := os.TempDir()
cli.Docker(cli.Args("build", "-t", "test9", ctx.Dir), cli.InDir(tmpDir)).Assert(c, icmd.Expected{
Out: "from Dockerfile",
})
cli.Docker(cli.Args("build", "-f", "dFile2", "-t", "test10", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{
Out: "from files/dFile2",
})
}
func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) {
testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows
testRequires(c, DaemonIsLinux)
// If Dockerfile is not present, use dockerfile
buildImage("test1", build.WithBuildContext(c,
build.WithFile("dockerfile", `FROM busybox
RUN echo from dockerfile`),
)).Assert(c, icmd.Expected{
Out: "from dockerfile",
})
// Prefer Dockerfile in place of dockerfile
buildImage("test1", build.WithBuildContext(c,
build.WithFile("dockerfile", `FROM busybox
RUN echo from dockerfile`),
build.WithFile("Dockerfile", `FROM busybox
RUN echo from Dockerfile`),
)).Assert(c, icmd.Expected{
Out: "from Dockerfile",
})
}
func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) {
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"baz": `FROM busybox
RUN echo from baz
COPY * /tmp/
RUN find /tmp/`}))
defer server.Close()
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
RUN echo from Dockerfile`))
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", server.URL()+"/baz"), func(cmd *icmd.Cmd) func() {
cmd.Dir = ctx.Dir
return nil
})
if !strings.Contains(result.Combined(), "from baz") ||
strings.Contains(result.Combined(), "/tmp/baz") ||
!strings.Contains(result.Combined(), "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", result.Combined())
}
}
func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
RUN echo "from Dockerfile"`))
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", "-"), func(cmd *icmd.Cmd) func() {
cmd.Dir = ctx.Dir
cmd.Stdin = strings.NewReader(`FROM busybox
RUN echo "from baz"
COPY * /tmp/
RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`)
return nil
})
if !strings.Contains(result.Combined(), "from baz") ||
strings.Contains(result.Combined(), "/tmp/baz") ||
!strings.Contains(result.Combined(), "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", result.Combined())
}
}
func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) {
name := "testbuildfromofficial"
fromNames := []string{
"busybox",
"docker.io/busybox",
"index.docker.io/busybox",
"library/busybox",
"docker.io/library/busybox",
"index.docker.io/library/busybox",
}
for idx, fromName := range fromNames {
imgName := fmt.Sprintf("%s%d", name, idx)
buildImageSuccessfully(c, imgName, build.WithDockerfile("FROM "+fromName))
dockerCmd(c, "rmi", imgName)
}
}
func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) {
testRequires(c, UnixCli, DaemonIsLinux) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2)
name := "testbuilddockerfileoutsidecontext"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil {
c.Fatal(err)
}
wd, err := os.Getwd()
if err != nil {
c.Fatal(err)
}
defer os.Chdir(wd)
if err := os.Chdir(ctx); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil {
c.Fatal(err)
}
for _, dockerfilePath := range []string{
filepath.Join("..", "outsideDockerfile"),
filepath.Join(ctx, "dockerfile1"),
filepath.Join(ctx, "dockerfile2"),
} {
result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".")
result.Assert(c, icmd.Expected{
Err: "must be within the build context",
ExitCode: 1,
})
deleteImages(name)
}
os.Chdir(tmpdir)
// Path to Dockerfile should be resolved relative to working directory, not relative to context.
// There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail
out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx)
if err == nil {
c.Fatalf("Expected error. Out: %s", out)
}
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildSpaces(c *check.C) {
// Test to make sure that leading/trailing spaces on a command
// doesn't change the error msg we get
name := "testspaces"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nCOPY\n"))
defer ctx.Close()
result1 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx))
result1.Assert(c, icmd.Expected{
ExitCode: 1,
})
ctx.Add("Dockerfile", "FROM busybox\nCOPY ")
result2 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx))
result2.Assert(c, icmd.Expected{
ExitCode: 1,
})
removeLogTimestamps := func(s string) string {
return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`)
}
// Skip over the times
e1 := removeLogTimestamps(result1.Error.Error())
e2 := removeLogTimestamps(result2.Error.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", result1.Error, result2.Error)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY")
result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx))
result2.Assert(c, icmd.Expected{
ExitCode: 1,
})
// Skip over the times
e1 = removeLogTimestamps(result1.Error.Error())
e2 = removeLogTimestamps(result2.Error.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", result1.Error, result2.Error)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY ")
result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx))
result2.Assert(c, icmd.Expected{
ExitCode: 1,
})
// Skip over the times
e1 = removeLogTimestamps(result1.Error.Error())
e2 = removeLogTimestamps(result2.Error.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", result1.Error, result2.Error)
}
}
func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) {
// Test to make sure that spaces in quotes aren't lost
name := "testspacesquotes"
dockerfile := `FROM busybox
RUN echo " \
foo "`
expected := "\n foo \n"
// Windows uses the builtin echo, which preserves quotes
if testEnv.DaemonPlatform() == "windows" {
expected = "\" foo \""
}
buildImage(name, build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{
Out: expected,
})
}
// #4393
func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This should error out
buildImage("docker-test-errcreatevolumewithfile", build.WithDockerfile(`
FROM busybox
RUN touch /foo
VOLUME /foo
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "file exists",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildMissingArgs(c *check.C) {
// Test to make sure that all Dockerfile commands (except the ones listed
// in skipCmds) will generate an error if no args are provided.
// Note: INSERT is deprecated so we exclude it because of that.
skipCmds := map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
}
if testEnv.DaemonPlatform() == "windows" {
skipCmds = map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
"STOPSIGNAL": {},
"ARG": {},
"USER": {},
"EXPOSE": {},
}
}
for cmd := range command.Commands {
cmd = strings.ToUpper(cmd)
if _, ok := skipCmds[cmd]; ok {
continue
}
var dockerfile string
if cmd == "FROM" {
dockerfile = cmd
} else {
// Add FROM to make sure we don't complain about it missing
dockerfile = "FROM busybox\n" + cmd
}
buildImage("args", build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: cmd + " requires",
})
}
}
func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) {
testRequires(c, DaemonIsLinux)
buildImage("sc", build.WithDockerfile("FROM scratch")).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "No image was generated",
})
}
func (s *DockerSuite) TestBuildDotDotFile(c *check.C) {
buildImageSuccessfully(c, "sc", build.WithBuildContext(c,
build.WithFile("Dockerfile", "FROM busybox\n"),
build.WithFile("..gitme", ""),
))
}
func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) {
testRequires(c, DaemonIsLinux) // No hello-world Windows image
name := "testbuildrunonejson"
buildImage(name, build.WithDockerfile(`FROM hello-world:frozen
RUN [ "/hello" ]`)).Assert(c, icmd.Expected{
Out: "Hello from Docker",
})
}
func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) {
name := "testbuildemptystringvolume"
buildImage(name, build.WithDockerfile(`
FROM busybox
ENV foo=""
VOLUME $foo
`)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) {
testRequires(c, SameHostDaemon, DaemonIsLinux)
cgroupParent := "test"
data, err := ioutil.ReadFile("/proc/self/cgroup")
if err != nil {
c.Fatalf("failed to read '/proc/self/cgroup - %v", err)
}
selfCgroupPaths := ParseCgroupPaths(string(data))
_, found := selfCgroupPaths["memory"]
if !found {
c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths)
}
result := buildImage("buildcgroupparent",
cli.WithFlags("--cgroup-parent", cgroupParent),
build.WithDockerfile(`
FROM busybox
RUN cat /proc/self/cgroup
`))
result.Assert(c, icmd.Success)
m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), result.Combined())
c.Assert(err, check.IsNil)
if !m {
c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, result.Combined())
}
}
// FIXME(vdemeester) could be a unit test
func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) {
// Check to make sure our build output prints the Dockerfile cmd
// property - there was a bug that caused it to be duplicated on the
// Step X line
name := "testbuildnodupoutput"
result := buildImage(name, build.WithDockerfile(`
FROM busybox
RUN env`))
result.Assert(c, icmd.Success)
exp := "\nStep 2/2 : RUN env\n"
if !strings.Contains(result.Combined(), exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp)
}
}
// GH15826
// FIXME(vdemeester) could be a unit test
func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) {
// Explicit check to ensure that build starts from step 1 rather than 0
name := "testbuildstartsfromone"
result := buildImage(name, build.WithDockerfile(`FROM busybox`))
result.Assert(c, icmd.Success)
exp := "\nStep 1/1 : FROM busybox\n"
if !strings.Contains(result.Combined(), exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp)
}
}
func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) {
// Test to make sure the bad command is quoted with just "s and
// not as a Go []string
name := "testbuildbadrunerrmsg"
shell := "/bin/sh -c"
exitCode := 127
if testEnv.DaemonPlatform() == "windows" {
shell = "cmd /S /C"
// architectural - Windows has to start the container to determine the exe is bad, Linux does not
exitCode = 1
}
exp := fmt.Sprintf(`The command '%s badEXE a1 \& a2 a3' returned a non-zero code: %d`, shell, exitCode)
buildImage(name, build.WithDockerfile(`
FROM busybox
RUN badEXE a1 \& a2 a3`)).Assert(c, icmd.Expected{
ExitCode: exitCode,
Err: exp,
})
}
func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) {
repoName := s.setupTrustedImage(c, "trusted-build")
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuild"
buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7]),
})
// We should also have a tag reference for the image.
dockerCmd(c, "inspect", repoName)
// We should now be able to remove the tag reference.
dockerCmd(c, "rmi", repoName)
}
func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL)
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuilduntrustedtag"
buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "does not have trust data for",
})
}
func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) {
testRequires(c, DaemonIsLinux)
tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tempDir)
// Make a real context directory in this temp directory with a simple
// Dockerfile.
realContextDirname := filepath.Join(tempDir, "context")
if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil {
c.Fatal(err)
}
if err = ioutil.WriteFile(
filepath.Join(realContextDirname, "Dockerfile"),
[]byte(`
FROM busybox
RUN echo hello world
`),
os.FileMode(0644),
); err != nil {
c.Fatal(err)
}
// Make a symlink to the real context directory.
contextSymlinkName := filepath.Join(tempDir, "context_link")
if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil {
c.Fatal(err)
}
// Executing the build with the symlink as the specified context should
// *not* fail.
dockerCmd(c, "build", contextSymlinkName)
}
func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) {
c.Skip("Blacklisting for Docker CE")
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create the releases role
s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the releases role
otherTag := fmt.Sprintf("%s:other", repoName)
cli.DockerCmd(c, "tag", "busybox", otherTag)
cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success)
s.assertTargetInRoles(c, repoName, "other", "targets/releases")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
cli.DockerCmd(c, "rmi", otherTag)
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildreleasesrole"
cli.BuildCmd(c, name, trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", repoName),
})
}
func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) {
c.Skip("Blacklisting for Docker CE")
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create a non-releases delegation role
s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the other role
otherTag := fmt.Sprintf("%s:other", repoName)
cli.DockerCmd(c, "tag", "busybox", otherTag)
cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success)
s.assertTargetInRoles(c, repoName, "other", "targets/other")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
cli.DockerCmd(c, "rmi", otherTag)
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildotherrole"
cli.Docker(cli.Build(name), trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
// Issue #15634: COPY fails when path starts with "null"
func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) {
name := "testbuildnullstringinaddcopyvolume"
volName := "nullvolume"
if testEnv.DaemonPlatform() == "windows" {
volName = `C:\\nullvolume`
}
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD null /
COPY nullfile /
VOLUME `+volName+`
`),
build.WithFile("null", "test1"),
build.WithFile("nullfile", "test2"),
))
}
func (s *DockerSuite) TestBuildStopSignal(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet
imgName := "test_build_stop_signal"
buildImageSuccessfully(c, imgName, build.WithDockerfile(`FROM busybox
STOPSIGNAL SIGKILL`))
res := inspectFieldJSON(c, imgName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
containerName := "test-container-stop-signal"
dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top")
res = inspectFieldJSON(c, containerName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
}
func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
var dockerfile string
if testEnv.DaemonPlatform() == "windows" {
// Bugs in Windows busybox port - use the default base image and native cmd stuff
dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+`
ARG %s
RUN echo %%%s%%
CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey)
} else {
dockerfile = fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s
CMD echo $%s`, envKey, envKey, envKey)
}
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Expected{
Out: envVal,
})
containerName := "bldargCont"
out, _ := dockerCmd(c, "run", "--name", containerName, imgName)
out = strings.Trim(out, " \r\n'")
if out != "" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envDef := "bar1"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s`, envKey, envDef)
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Expected{
Out: envVal,
})
out, _ := dockerCmd(c, "history", "--no-trunc", imgName)
outputTabs := strings.Split(out, "\n")[1]
if !strings.Contains(outputTabs, envDef) {
c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef)
}
}
func (s *DockerSuite) TestBuildTimeArgHistoryExclusions(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
proxy := "HTTP_PROXY=http://user:[email protected]"
explicitProxyKey := "http_proxy"
explicitProxyVal := "http://user:[email protected]"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ARG %s
RUN echo "Testing Build Args!"`, envKey, explicitProxyKey)
buildImage := func(imgName string) string {
cli.BuildCmd(c, imgName,
cli.WithFlags("--build-arg", "https_proxy=https://proxy.example.com",
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
"--build-arg", fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal),
"--build-arg", proxy),
build.WithDockerfile(dockerfile),
)
return getIDByName(c, imgName)
}
origID := buildImage(imgName)
result := cli.DockerCmd(c, "history", "--no-trunc", imgName)
out := result.Stdout()
if strings.Contains(out, proxy) {
c.Fatalf("failed to exclude proxy settings from history!")
}
if strings.Contains(out, "https_proxy") {
c.Fatalf("failed to exclude proxy settings from history!")
}
result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", envKey, envVal)})
result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal)})
cacheID := buildImage(imgName + "-two")
c.Assert(origID, checker.Equals, cacheID)
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
buildImageSuccessfully(c, imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
origImgID := getIDByName(c, imgName)
imgNameCache := "bldargtestcachehit"
buildImageSuccessfully(c, imgNameCache,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
newImgID := getIDByName(c, imgName)
if newImgID != origImgID {
c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
extraEnvKey := "foo1"
extraEnvVal := "bar1"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ARG %s
RUN echo $%s`, envKey, extraEnvKey, envKey)
buildImageSuccessfully(c, imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
origImgID := getIDByName(c, imgName)
imgNameCache := "bldargtestcachemiss"
buildImageSuccessfully(c, imgNameCache,
cli.WithFlags(
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
"--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal),
),
build.WithDockerfile(dockerfile),
)
newImgID := getIDByName(c, imgNameCache)
if newImgID == origImgID {
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
newEnvVal := "bar1"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
buildImageSuccessfully(c, imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
origImgID := getIDByName(c, imgName)
imgNameCache := "bldargtestcachemiss"
buildImageSuccessfully(c, imgNameCache,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal)),
build.WithDockerfile(dockerfile),
)
newImgID := getIDByName(c, imgNameCache)
if newImgID == origImgID {
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOverride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
RUN echo $%s
CMD echo $%s
`, envKey, envKey, envValOverride, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOverride) != 2 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride)
}
}
// FIXME(vdemeester) might be useful to merge with the one above ?
func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOverride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ENV %s %s
ARG %s
RUN echo $%s
CMD echo $%s
`, envKey, envValOverride, envKey, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOverride) != 2 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) {
imgName := "bldvarstest"
wdVar := "WDIR"
wdVal := "/tmp/"
addVar := "AFILE"
addVal := "addFile"
copyVar := "CFILE"
copyVal := "copyFile"
envVar := "foo"
envVal := "bar"
exposeVar := "EPORT"
exposeVal := "9999"
userVar := "USER"
userVal := "testUser"
volVar := "VOL"
volVal := "/testVol/"
if DaemonIsWindows() {
volVal = "C:\\testVol"
wdVal = "C:\\tmp"
}
buildImageSuccessfully(c, imgName,
cli.WithFlags(
"--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal),
"--build-arg", fmt.Sprintf("%s=%s", addVar, addVal),
"--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal),
"--build-arg", fmt.Sprintf("%s=%s", envVar, envVal),
"--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal),
"--build-arg", fmt.Sprintf("%s=%s", userVar, userVal),
"--build-arg", fmt.Sprintf("%s=%s", volVar, volVal),
),
build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
ARG %s
WORKDIR ${%s}
ARG %s
ADD ${%s} testDir/
ARG %s
COPY $%s testDir/
ARG %s
ENV %s=${%s}
ARG %s
EXPOSE $%s
ARG %s
USER $%s
ARG %s
VOLUME ${%s}`,
wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar,
envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar)),
build.WithFile(addVal, "some stuff"),
build.WithFile(copyVal, "some stuff"),
),
)
res := inspectField(c, imgName, "Config.WorkingDir")
c.Check(filepath.ToSlash(res), check.Equals, filepath.ToSlash(wdVal))
var resArr []string
inspectFieldAndUnmarshall(c, imgName, "Config.Env", &resArr)
found := false
for _, v := range resArr {
if fmt.Sprintf("%s=%s", envVar, envVal) == v {
found = true
break
}
}
if !found {
c.Fatalf("Config.Env value mismatch. Expected <key=value> to exist: %s=%s, got: %v",
envVar, envVal, resArr)
}
var resMap map[string]interface{}
inspectFieldAndUnmarshall(c, imgName, "Config.ExposedPorts", &resMap)
if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok {
c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap)
}
res = inspectField(c, imgName, "Config.User")
if res != userVal {
c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res)
}
inspectFieldAndUnmarshall(c, imgName, "Config.Volumes", &resMap)
if _, ok := resMap[volVal]; !ok {
c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldvarstest"
envKey := "foo"
envVal := "bar"
envKey1 := "foo1"
envValOverride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
ENV %s ${%s}
RUN echo $%s
CMD echo $%s`, envKey, envKey, envValOverride, envKey1, envKey, envKey1, envKey1)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOverride) != 2 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
ARG %s
CMD echo $%s`, envKey, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), envVal) {
c.Fatalf("able to access environment variable in output: %q expected to be missing", result.Combined())
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support --build-arg
imgName := "bldargtest"
envKey := "HTTP_PROXY"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if !strings.Contains(result.Combined(), envVal) {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envVal)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOverride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s
ENV %s $%s
RUN echo $%s
CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envValOverride)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOverride) != 1 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
warnStr := "[Warning] One or more build-args"
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Expected{
Out: warnStr,
})
}
func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
dockerfile := `FROM busybox
ARG FOO1=fromfile
ARG FOO2=fromfile
ARG FOO3=fromfile
ARG FOO4=fromfile
ARG FOO5
ARG FOO6
ARG FO10
RUN env
RUN [ "$FOO1" == "fromcmd" ]
RUN [ "$FOO2" == "" ]
RUN [ "$FOO3" == "fromenv" ]
RUN [ "$FOO4" == "fromfile" ]
RUN [ "$FOO5" == "fromcmd" ]
# The following should not exist at all in the env
RUN [ "$(env | grep FOO6)" == "" ]
RUN [ "$(env | grep FOO7)" == "" ]
RUN [ "$(env | grep FOO8)" == "" ]
RUN [ "$(env | grep FOO9)" == "" ]
RUN [ "$FO10" == "" ]
`
result := buildImage("testbuildtimeargenv",
cli.WithFlags(
"--build-arg", fmt.Sprintf("FOO1=fromcmd"),
"--build-arg", fmt.Sprintf("FOO2="),
"--build-arg", fmt.Sprintf("FOO3"), // set in env
"--build-arg", fmt.Sprintf("FOO4"), // not set in env
"--build-arg", fmt.Sprintf("FOO5=fromcmd"),
// FOO6 is not set at all
"--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning
"--build-arg", fmt.Sprintf("FOO8="), // should produce a warning
"--build-arg", fmt.Sprintf("FOO9"), // should produce a warning
"--build-arg", fmt.Sprintf("FO10"), // not set in env, empty value
),
cli.WithEnvironmentVariables(append(os.Environ(),
"FOO1=fromenv",
"FOO2=fromenv",
"FOO3=fromenv")...),
build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
),
)
result.Assert(c, icmd.Success)
// Now check to make sure we got a warning msg about unused build-args
i := strings.Index(result.Combined(), "[Warning]")
if i < 0 {
c.Fatalf("Missing the build-arg warning in %q", result.Combined())
}
out := result.Combined()[i:] // "out" should contain just the warning message now
// These were specified on a --build-arg but no ARG was in the Dockerfile
c.Assert(out, checker.Contains, "FOO7")
c.Assert(out, checker.Contains, "FOO8")
c.Assert(out, checker.Contains, "FOO9")
}
func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
envKey3 := "foo3"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=""
ARG %s=''
ARG %s="''"
ARG %s='""'
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3,
envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3,
envKey2, envKey3)
buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile))
}
func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=
ARG %s=""
ARG %s=''
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2)
buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile))
}
func (s *DockerSuite) TestBuildBuildTimeArgDefinitionWithNoEnvInjection(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN env`, envKey)
result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile))
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envKey) != 1 {
c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", result.Combined())
}
}
func (s *DockerSuite) TestBuildBuildTimeArgMultipleFrom(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `FROM busybox
ARG foo=abc
LABEL multifromtest=1
RUN env > /out
FROM busybox
ARG bar=def
RUN env > /out`
result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile))
result.Assert(c, icmd.Success)
result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1")
parentID := strings.TrimSpace(result.Stdout())
result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out")
c.Assert(result.Stdout(), checker.Contains, "foo=abc")
result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "foo")
c.Assert(result.Stdout(), checker.Contains, "bar=def")
}
func (s *DockerSuite) TestBuildBuildTimeFromArgMultipleFrom(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `ARG tag=nosuchtag
FROM busybox:${tag}
LABEL multifromtest=1
RUN env > /out
FROM busybox:${tag}
ARG tag
RUN env > /out`
result := cli.BuildCmd(c, imgName,
build.WithDockerfile(dockerfile),
cli.WithFlags("--build-arg", fmt.Sprintf("tag=latest")))
result.Assert(c, icmd.Success)
result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1")
parentID := strings.TrimSpace(result.Stdout())
result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "tag")
result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out")
c.Assert(result.Stdout(), checker.Contains, "tag=latest")
}
func (s *DockerSuite) TestBuildBuildTimeUnusedArgMultipleFrom(c *check.C) {
imgName := "multifromunusedarg"
dockerfile := `FROM busybox
ARG foo
FROM busybox
ARG bar
RUN env > /out`
result := cli.BuildCmd(c, imgName,
build.WithDockerfile(dockerfile),
cli.WithFlags("--build-arg", fmt.Sprintf("baz=abc")))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "[Warning]")
c.Assert(result.Combined(), checker.Contains, "[baz] were not consumed")
result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "bar")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "baz")
}
func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) {
volName := "testname:/foo"
if testEnv.DaemonPlatform() == "windows" {
volName = "testname:C:\\foo"
}
dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops")
dockerFile := `FROM busybox
VOLUME ` + volName + `
RUN ls /foo/oops
`
buildImage("test", build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
func (s *DockerSuite) TestBuildTagEvent(c *check.C) {
since := daemonUnixTime(c)
dockerFile := `FROM busybox
RUN echo events
`
buildImageSuccessfully(c, "test", build.WithDockerfile(dockerFile))
until := daemonUnixTime(c)
out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image")
events := strings.Split(strings.TrimSpace(out), "\n")
actions := eventActionsByIDAndType(c, events, "test:latest", "image")
var foundTag bool
for _, a := range actions {
if a == "tag" {
foundTag = true
break
}
}
c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out))
}
// #15780
func (s *DockerSuite) TestBuildMultipleTags(c *check.C) {
dockerfile := `
FROM busybox
MAINTAINER test-15780
`
buildImageSuccessfully(c, "tag1", cli.WithFlags("-t", "tag2:v2", "-t", "tag1:latest", "-t", "tag1"), build.WithDockerfile(dockerfile))
id1 := getIDByName(c, "tag1")
id2 := getIDByName(c, "tag2:v2")
c.Assert(id1, check.Equals, id2)
}
// #17290
func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY . ./`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
err := os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
// warm up cache
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
// add new file to context, should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644)
c.Assert(err, checker.IsNil)
result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
if strings.Contains(result.Combined(), "Using cache") {
c.Fatal("2nd build used cache on ADD, it shouldn't")
}
}
func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY asymlink target`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined()
c.Assert(out, checker.Matches, "bar")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache")
out = cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined()
c.Assert(out, checker.Matches, "baz")
}
func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY asymlink /`),
fakecontext.WithFiles(map[string]string{
"foo/abc": "bar",
"foo/def": "baz",
}))
defer ctx.Close()
err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined()
c.Assert(out, checker.Matches, "barbaz")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644)
c.Assert(err, checker.IsNil)
result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache")
out = cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined()
c.Assert(out, checker.Matches, "barbax")
}
// TestBuildSymlinkBasename tests that target file gets basename from symlink,
// not from the target file.
func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY asymlink /`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "--rm", name, "cat", "asymlink").Combined()
c.Assert(out, checker.Matches, "bar")
}
// #17827
func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) {
name := "testbuildrootsource"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY / /data`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
// warm up cache
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
// change file, should invalidate cache
err := ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache")
}
// #19375
func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) {
buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="),
build.WithContextPath("github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "unable to prepare context: unable to find 'git': ",
})
buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="),
build.WithContextPath("https://github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "unable to prepare context: unable to find 'git': ",
})
}
// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir
func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildworkdirwindowspath"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+testEnv.MinimalBaseImage()+`
RUN mkdir C:\\work
WORKDIR C:\\work
RUN if "%CD%" NEQ "C:\work" exit -1
`))
}
func (s *DockerSuite) TestBuildLabel(c *check.C) {
name := "testbuildlabel"
testLabel := "foo"
buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel),
build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) {
name := "testbuildlabel"
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=bar"),
build.WithDockerfile("FROM busybox"))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
v, ok := labels["foo"]
if !ok {
c.Fatal("label `foo` not found in image")
}
c.Assert(v, checker.Equals, "bar")
}
func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) {
name := "testbuildlabelcachecommit"
testLabel := "foo"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel),
build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) {
name := "testbuildlabelmultiple"
testLabels := map[string]string{
"foo": "bar",
"123": "456",
}
labelArgs := []string{}
for k, v := range testLabels {
labelArgs = append(labelArgs, "--label", k+"="+v)
}
buildImageSuccessfully(c, name, cli.WithFlags(labelArgs...),
build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
for k, v := range testLabels {
if x, ok := labels[k]; !ok || x != v {
c.Fatalf("label %s=%s not found in image", k, v)
}
}
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) {
dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
baseImage := privateRegistryURL + "/baseimage"
buildImageSuccessfully(c, baseImage, build.WithDockerfile(`
FROM busybox
ENV env1 val1
`))
dockerCmd(c, "push", baseImage)
dockerCmd(c, "rmi", baseImage)
buildImageSuccessfully(c, baseImage, build.WithDockerfile(fmt.Sprintf(`
FROM %s
ENV env2 val2
`, baseImage)))
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) {
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
workingDir, err := os.Getwd()
c.Assert(err, checker.IsNil)
absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth"))
c.Assert(err, checker.IsNil)
testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute)
os.Setenv("PATH", testPath)
repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL)
tmp, err := ioutil.TempDir("", "integration-cli-")
c.Assert(err, checker.IsNil)
externalAuthConfig := `{ "credsStore": "shell-test" }`
configPath := filepath.Join(tmp, "config.json")
err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644)
c.Assert(err, checker.IsNil)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
b, err := ioutil.ReadFile(configPath)
c.Assert(err, checker.IsNil)
c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":")
dockerCmd(c, "--config", tmp, "tag", "busybox", repoName)
dockerCmd(c, "--config", tmp, "push", repoName)
// make sure the image is pulled when building
dockerCmd(c, "rmi", repoName)
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "--config", tmp, "build", "-"},
Stdin: strings.NewReader(fmt.Sprintf("FROM %s", repoName)),
}).Assert(c, icmd.Success)
}
// Test cases in #22036
func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) {
// Command line option labels will always override
name := "scratchy"
expected := `{"bar":"from-flag","foo":"from-flag"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"),
build.WithDockerfile(`FROM `+minimalBaseImage()+`
LABEL foo=from-dockerfile`))
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
name = "from"
expected = `{"foo":"from-dockerfile"}`
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
LABEL foo from-dockerfile`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option label will override even via `FROM`
name = "new"
expected = `{"bar":"from-dockerfile2","foo":"new"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=new"),
build.WithDockerfile(`FROM from
LABEL bar from-dockerfile2`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
name = "scratchy2"
expected = `{"bar":"","foo":""}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo", "--label", "bar="),
build.WithDockerfile(`FROM `+minimalBaseImage()+`
LABEL foo=from-dockerfile`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
// This time is for inherited images
name = "new2"
expected = `{"bar":"","foo":""}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=", "--label", "bar"),
build.WithDockerfile(`FROM from
LABEL bar from-dockerfile2`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option labels with only `FROM`
name = "scratchy"
expected = `{"bar":"from-flag","foo":"from-flag"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"),
build.WithDockerfile(`FROM `+minimalBaseImage()))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option labels with env var
name = "scratchz"
expected = `{"bar":"$PATH"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "bar=$PATH"),
build.WithDockerfile(`FROM `+minimalBaseImage()))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
// Test case for #22855
func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) {
name := "test-delete-committed-file"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN echo test > file
RUN test -e file
RUN rm file
RUN sh -c "! test -e file"`))
}
// #20083
func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) {
// TODO Windows: Figure out why this test is flakey on TP5. If you add
// something like RUN sleep 5, or even RUN ls /tmp after the ADD line,
// it is more reliable, but that's not a good fix.
testRequires(c, DaemonIsLinux)
name := "testbuilddockerignorecleanpaths"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "(ls -la /tmp/#1)"
RUN sh -c "(! ls -la /tmp/#2)"
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("foo", "foo"),
build.WithFile("foo2", "foo2"),
build.WithFile("dir1/foo", "foo in dir1"),
build.WithFile("#1", "# file 1"),
build.WithFile("#2", "# file 2"),
build.WithFile(".dockerignore", `# Visual C++ cache files
# because we have git ;-)
# The above comment is from #20083
foo
#dir1/foo
foo2
# The following is considered as comment as # is at the beginning
#1
# The following is not considered as comment as # is not at the beginning
#2
`)))
}
// Test case for #23221
func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) {
name := "test-with-utf8-bom"
dockerfile := []byte(`FROM busybox`)
bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...)
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", string(bomDockerfile)),
))
}
// Test case for UTF-8 BOM in .dockerignore, related to #23221
func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) {
name := "test-with-utf8-bom-dockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls -la /tmp
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
dockerignore := []byte("./Dockerfile\n")
bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...)
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", string(bomDockerignore)),
))
}
// #22489 Shell test to confirm config gets updated correctly
func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) {
name := "testbuildshellupdatesconfig"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
SHELL ["foo", "-bar"]`))
expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]`
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
if res != expected {
c.Fatalf("%s, expected %s", res, expected)
}
res = inspectFieldJSON(c, name, "ContainerConfig.Shell")
if res != `["foo","-bar"]` {
c.Fatalf(`%s, expected ["foo","-bar"]`, res)
}
}
// #22489 Changing the shell multiple times and CMD after.
func (s *DockerSuite) TestBuildShellMultiple(c *check.C) {
name := "testbuildshellmultiple"
result := buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo defaultshell
SHELL ["echo"]
RUN echoshell
SHELL ["ls"]
RUN -l
CMD -l`))
result.Assert(c, icmd.Success)
// Must contain 'defaultshell' twice
if len(strings.Split(result.Combined(), "defaultshell")) != 3 {
c.Fatalf("defaultshell should have appeared twice in %s", result.Combined())
}
// Must contain 'echoshell' twice
if len(strings.Split(result.Combined(), "echoshell")) != 3 {
c.Fatalf("echoshell should have appeared twice in %s", result.Combined())
}
// Must contain "total " (part of ls -l)
if !strings.Contains(result.Combined(), "total ") {
c.Fatalf("%s should have contained 'total '", result.Combined())
}
// A container started from the image uses the shell-form CMD.
// Last shell is ls. CMD is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489. Changed SHELL with ENTRYPOINT
func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) {
name := "testbuildshellentrypoint"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
SHELL ["ls"]
ENTRYPOINT -l`))
// A container started from the image uses the shell-form ENTRYPOINT.
// Shell is ls. ENTRYPOINT is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489 Shell test to confirm shell is inherited in a subsequent build
func (s *DockerSuite) TestBuildShellInherited(c *check.C) {
name1 := "testbuildshellinherited1"
buildImageSuccessfully(c, name1, build.WithDockerfile(`FROM busybox
SHELL ["ls"]`))
name2 := "testbuildshellinherited2"
buildImage(name2, build.WithDockerfile(`FROM `+name1+`
RUN -l`)).Assert(c, icmd.Expected{
// ls -l has "total " followed by some number in it, ls without -l does not.
Out: "total ",
})
}
// #22489 Shell test to confirm non-JSON doesn't work
func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) {
name := "testbuildshellnotjson"
buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
sHeLl exec -form`, // Casing explicit to ensure error is upper-cased.
)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "SHELL requires the arguments to be in JSON form",
})
}
// #22489 Windows shell test to confirm native is powershell if executing a PS command
// This would error if the default shell were still cmd.
func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildshellpowershell"
buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
SHELL ["powershell", "-command"]
RUN Write-Host John`)).Assert(c, icmd.Expected{
Out: "\nJohn\n",
})
}
// Verify that escape is being correctly applied to words when escape directive is not \.
// Tests WORKDIR, ADD
func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildescapenotbackslashwordtesta"
buildImage(name, build.WithDockerfile(`# escape= `+"`"+`
FROM `+minimalBaseImage()+`
WORKDIR c:\windows
RUN dir /w`)).Assert(c, icmd.Expected{
Out: "[System32]",
})
name = "testbuildescapenotbackslashwordtestb"
buildImage(name, build.WithDockerfile(`# escape= `+"`"+`
FROM `+minimalBaseImage()+`
SHELL ["powershell.exe"]
WORKDIR c:\foo
ADD Dockerfile c:\foo\
RUN dir Dockerfile`)).Assert(c, icmd.Expected{
Out: "-a----",
})
}
// #22868. Make sure shell-form CMD is marked as escaped in the config of the image
func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildcmdshellescaped"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+minimalBaseImage()+`
CMD "ipconfig"
`))
res := inspectFieldJSON(c, name, "Config.ArgsEscaped")
if res != "true" {
c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res)
}
dockerCmd(c, "run", "--name", "inspectme", name)
dockerCmd(c, "wait", "inspectme")
res = inspectFieldJSON(c, name, "Config.Cmd")
if res != `["cmd","/S","/C","\"ipconfig\""]` {
c.Fatalf("CMD was not escaped Config.Cmd: got %v", res)
}
}
// Test case for #24912.
func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) {
name := "testbuildstepswithprogress"
totalRun := 5
result := buildImage(name, build.WithDockerfile("FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun)))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun))
for i := 2; i <= 1+totalRun; i++ {
c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun))
}
}
func (s *DockerSuite) TestBuildWithFailure(c *check.C) {
name := "testbuildwithfailure"
// First test case can only detect `nobody` in runtime so all steps will show up
dockerfile := "FROM busybox\nRUN nobody"
result := buildImage(name, build.WithDockerfile(dockerfile))
c.Assert(result.Error, checker.NotNil)
c.Assert(result.Stdout(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Stdout(), checker.Contains, "Step 2/2 : RUN nobody")
// Second test case `FFOM` should have been detected before build runs so no steps
dockerfile = "FFOM nobody\nRUN nobody"
result = buildImage(name, build.WithDockerfile(dockerfile))
c.Assert(result.Error, checker.NotNil)
c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 1/2 : FROM busybox")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 2/2 : RUN nobody")
}
func (s *DockerSuite) TestBuildCacheFromEqualDiffIDsLength(c *check.C) {
dockerfile := `
FROM busybox
RUN echo "test"
ENTRYPOINT ["sh"]`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"Dockerfile": dockerfile,
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, "build1")
// rebuild with cache-from
result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, "build2")
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2)
}
func (s *DockerSuite) TestBuildCacheFrom(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
ENV FOO=bar
ADD baz /
RUN touch bax`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"Dockerfile": dockerfile,
"baz": "baz",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, "build1")
// rebuild with cache-from
result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, "build2")
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3)
cli.DockerCmd(c, "rmi", "build2")
// no cache match with unknown source
result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=nosuchtag"), build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, "build2")
c.Assert(id1, checker.Not(checker.Equals), id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 0)
cli.DockerCmd(c, "rmi", "build2")
// clear parent images
tempDir, err := ioutil.TempDir("", "test-build-cache-from-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
tempFile := filepath.Join(tempDir, "img.tar")
cli.DockerCmd(c, "save", "-o", tempFile, "build1")
cli.DockerCmd(c, "rmi", "build1")
cli.DockerCmd(c, "load", "-i", tempFile)
parentID := cli.DockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1").Combined()
c.Assert(strings.TrimSpace(parentID), checker.Equals, "")
// cache still applies without parents
result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, "build2")
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3)
history1 := cli.DockerCmd(c, "history", "-q", "build2").Combined()
// Retry, no new intermediate images
result = cli.BuildCmd(c, "build3", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, "build3")
c.Assert(id1, checker.Equals, id3)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3)
history2 := cli.DockerCmd(c, "history", "-q", "build3").Combined()
c.Assert(history1, checker.Equals, history2)
cli.DockerCmd(c, "rmi", "build2")
cli.DockerCmd(c, "rmi", "build3")
cli.DockerCmd(c, "rmi", "build1")
cli.DockerCmd(c, "load", "-i", tempFile)
// Modify file, everything up to last command and layers are reused
dockerfile = `
FROM busybox
ENV FOO=bar
ADD baz /
RUN touch newfile`
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644)
c.Assert(err, checker.IsNil)
result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, "build2")
c.Assert(id1, checker.Not(checker.Equals), id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2)
layers1Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1").Combined()
layers2Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2").Combined()
var layers1 []string
var layers2 []string
c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil)
c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil)
c.Assert(len(layers1), checker.Equals, len(layers2))
for i := 0; i < len(layers1)-1; i++ {
c.Assert(layers1[i], checker.Equals, layers2[i])
}
c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1])
}
func (s *DockerSuite) TestBuildCacheMultipleFrom(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
ADD baz /
FROM busybox
ADD baz /`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"Dockerfile": dockerfile,
"baz": "baz",
}))
defer ctx.Close()
result := cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
// second part of dockerfile was a repeat of first so should be cached
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1)
result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
// now both parts of dockerfile should be cached
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2)
}
func (s *DockerSuite) TestBuildNetNone(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildnetnone"
buildImage(name, cli.WithFlags("--network=none"), build.WithDockerfile(`
FROM busybox
RUN ping -c 1 8.8.8.8
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Out: "unreachable",
})
}
func (s *DockerSuite) TestBuildNetContainer(c *check.C) {
testRequires(c, DaemonIsLinux)
id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname")
name := "testbuildnetcontainer"
buildImageSuccessfully(c, name, cli.WithFlags("--network=container:"+strings.TrimSpace(id)),
build.WithDockerfile(`
FROM busybox
RUN nc localhost 1234 > /otherhost
`))
host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost")
c.Assert(strings.TrimSpace(host), check.Equals, "foobar")
}
func (s *DockerSuite) TestBuildWithExtraHost(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildwithextrahost"
buildImageSuccessfully(c, name,
cli.WithFlags(
"--add-host", "foo:127.0.0.1",
"--add-host", "bar:127.0.0.1",
),
build.WithDockerfile(`
FROM busybox
RUN ping -c 1 foo
RUN ping -c 1 bar
`))
}
func (s *DockerSuite) TestBuildWithExtraHostInvalidFormat(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerfile := `
FROM busybox
RUN ping -c 1 foo`
testCases := []struct {
testName string
dockerfile string
buildFlag string
}{
{"extra_host_missing_ip", dockerfile, "--add-host=foo"},
{"extra_host_missing_ip_with_delimiter", dockerfile, "--add-host=foo:"},
{"extra_host_missing_hostname", dockerfile, "--add-host=:127.0.0.1"},
{"extra_host_invalid_ipv4", dockerfile, "--add-host=foo:101.10.2"},
{"extra_host_invalid_ipv6", dockerfile, "--add-host=foo:2001::1::3F"},
}
for _, tc := range testCases {
result := buildImage(tc.testName, cli.WithFlags(tc.buildFlag), build.WithDockerfile(tc.dockerfile))
result.Assert(c, icmd.Expected{
ExitCode: 125,
})
}
}
func (s *DockerSuite) TestBuildSquashParent(c *check.C) {
testRequires(c, ExperimentalDaemon)
dockerFile := `
FROM busybox
RUN echo hello > /hello
RUN echo world >> /hello
RUN echo hello > /remove_me
ENV HELLO world
RUN rm /remove_me
`
// build and get the ID that we can use later for history comparison
name := "test"
buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile))
origID := getIDByName(c, name)
// build with squash
buildImageSuccessfully(c, name, cli.WithFlags("--squash"), build.WithDockerfile(dockerFile))
id := getIDByName(c, name)
out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello")
c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld")
dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]")
dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`)
// make sure the ID produced is the ID of the tag we specified
inspectID := inspectImage(c, "test", ".ID")
c.Assert(inspectID, checker.Equals, id)
origHistory, _ := dockerCmd(c, "history", origID)
testHistory, _ := dockerCmd(c, "history", "test")
splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n")
splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n")
c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1)
out = inspectImage(c, id, "len .RootFS.Layers")
c.Assert(strings.TrimSpace(out), checker.Equals, "2")
}
func (s *DockerSuite) TestBuildContChar(c *check.C) {
name := "testbuildcontchar"
buildImage(name, build.WithDockerfile(`FROM busybox\`)).Assert(c, icmd.Expected{
Out: "Step 1/1 : FROM busybox",
})
result := buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hi \`))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi\n")
result = buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hi \\`))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\n")
result = buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hi \\\`))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n")
}
func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) {
dockerfile := `
FROM busybox AS first
COPY foo bar
FROM busybox
%s
COPY baz baz
RUN echo mno > baz/cc
FROM busybox
COPY bar /
COPY --from=1 baz sub/
COPY --from=0 bar baz
COPY --from=first bar bay`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, "")),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
"bar": "def",
"baz/aa": "ghi",
"baz/bb": "jkl",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
cli.DockerCmd(c, "run", "build1", "cat", "bar").Assert(c, icmd.Expected{Out: "def"})
cli.DockerCmd(c, "run", "build1", "cat", "sub/aa").Assert(c, icmd.Expected{Out: "ghi"})
cli.DockerCmd(c, "run", "build1", "cat", "sub/cc").Assert(c, icmd.Expected{Out: "mno"})
cli.DockerCmd(c, "run", "build1", "cat", "baz").Assert(c, icmd.Expected{Out: "abc"})
cli.DockerCmd(c, "run", "build1", "cat", "bay").Assert(c, icmd.Expected{Out: "abc"})
result := cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx))
// all commands should be cached
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 7)
c.Assert(getIDByName(c, "build1"), checker.Equals, getIDByName(c, "build2"))
err := ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(fmt.Sprintf(dockerfile, "COPY baz/aa foo")), 0644)
c.Assert(err, checker.IsNil)
// changing file in parent block should not affect last block
result = cli.BuildCmd(c, "build3", build.WithExternalBuildContext(ctx))
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5)
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("pqr"), 0644)
c.Assert(err, checker.IsNil)
// changing file in parent block should affect both first and last block
result = cli.BuildCmd(c, "build4", build.WithExternalBuildContext(ctx))
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5)
cli.DockerCmd(c, "run", "build4", "cat", "bay").Assert(c, icmd.Expected{Out: "pqr"})
cli.DockerCmd(c, "run", "build4", "cat", "baz").Assert(c, icmd.Expected{Out: "pqr"})
}
func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) {
testCases := []struct {
dockerfile string
expectedError string
}{
{
dockerfile: `
FROM busybox
COPY --from=foo foo bar`,
expectedError: "invalid from flag value foo",
},
{
dockerfile: `
FROM busybox
COPY --from=0 foo bar`,
expectedError: "invalid from flag value 0: refers to current build stage",
},
{
dockerfile: `
FROM busybox AS foo
COPY --from=bar foo bar`,
expectedError: "invalid from flag value bar",
},
{
dockerfile: `
FROM busybox AS 1
COPY --from=1 foo bar`,
expectedError: "invalid name for build stage",
},
}
for _, tc := range testCases {
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(tc.dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
}))
cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: tc.expectedError,
})
ctx.Close()
}
}
func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) {
dockerfile := `
FROM busybox
COPY foo bar`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
dockerfile = `
FROM build1:latest AS foo
FROM busybox
COPY --from=foo bar /
COPY foo /`
ctx = fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "def",
}))
defer ctx.Close()
cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "build2", "cat", "bar").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "abc")
out = cli.DockerCmd(c, "run", "build2", "cat", "foo").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "def")
}
func (s *DockerSuite) TestBuildCopyFromImplicitFrom(c *check.C) {
dockerfile := `
FROM busybox
COPY --from=busybox /etc/passwd /mypasswd
RUN cmp /etc/passwd /mypasswd`
if DaemonIsWindows() {
dockerfile = `
FROM busybox
COPY --from=busybox License.txt foo`
}
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
)
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
if DaemonIsWindows() {
out := cli.DockerCmd(c, "run", "build1", "cat", "License.txt").Combined()
c.Assert(len(out), checker.GreaterThan, 10)
out2 := cli.DockerCmd(c, "run", "build1", "cat", "foo").Combined()
c.Assert(out, check.Equals, out2)
}
}
func (s *DockerRegistrySuite) TestBuildCopyFromImplicitPullingFrom(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL)
dockerfile := `
FROM busybox
COPY foo bar`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
}))
defer ctx.Close()
cli.BuildCmd(c, repoName, build.WithExternalBuildContext(ctx))
cli.DockerCmd(c, "push", repoName)
cli.DockerCmd(c, "rmi", repoName)
dockerfile = `
FROM busybox
COPY --from=%s bar baz`
ctx = fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, repoName)))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
cli.Docker(cli.Args("run", "build1", "cat", "baz")).Assert(c, icmd.Expected{Out: "abc"})
}
func (s *DockerSuite) TestBuildFromPreviousBlock(c *check.C) {
dockerfile := `
FROM busybox as foo
COPY foo /
FROM foo as foo1
RUN echo 1 >> foo
FROM foo as foO2
RUN echo 2 >> foo
FROM foo
COPY --from=foo1 foo f1
COPY --from=FOo2 foo f2
` // foo2 case also tests that names are canse insensitive
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
cli.Docker(cli.Args("run", "build1", "cat", "foo")).Assert(c, icmd.Expected{Out: "bar"})
cli.Docker(cli.Args("run", "build1", "cat", "f1")).Assert(c, icmd.Expected{Out: "bar1"})
cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"})
}
func (s *DockerTrustSuite) TestCopyFromTrustedBuild(c *check.C) {
img1 := s.setupTrustedImage(c, "trusted-build1")
img2 := s.setupTrustedImage(c, "trusted-build2")
dockerFile := fmt.Sprintf(`
FROM %s AS build-base
RUN echo ok > /foo
FROM %s
COPY --from=build-base foo bar`, img1, img2)
name := "testcopyfromtrustedbuild"
r := buildImage(name, trustedBuild, build.WithDockerfile(dockerFile))
r.Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", img1[:len(img1)-7]),
})
r.Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", img2[:len(img2)-7]),
})
dockerCmdWithResult("run", name, "cat", "bar").Assert(c, icmd.Expected{Out: "ok"})
}
func (s *DockerSuite) TestBuildCopyFromPreviousFromWindows(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
COPY foo c:\\bar`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
dockerfile = `
FROM build1:latest
FROM ` + testEnv.MinimalBaseImage() + `
COPY --from=0 c:\\bar /
COPY foo /`
ctx = fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "def",
}))
defer ctx.Close()
cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\bar").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "abc")
out = cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\foo").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "def")
}
func (s *DockerSuite) TestBuildCopyFromForbidWindowsSystemPaths(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
FROM ` + testEnv.MinimalBaseImage() + `
COPY --from=0 %s c:\\oscopy
`
exp := icmd.Expected{
ExitCode: 1,
Err: "copy from c:\\ or c:\\windows is not allowed on windows",
}
buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\"))).Assert(c, exp)
buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "C:\\\\"))).Assert(c, exp)
buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\windows"))).Assert(c, exp)
buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\wInDows"))).Assert(c, exp)
}
func (s *DockerSuite) TestBuildCopyFromForbidWindowsRelativePaths(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
FROM ` + testEnv.MinimalBaseImage() + `
COPY --from=0 %s c:\\oscopy
`
exp := icmd.Expected{
ExitCode: 1,
Err: "copy from c:\\ or c:\\windows is not allowed on windows",
}
buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:"))).Assert(c, exp)
buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "."))).Assert(c, exp)
buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "..\\\\"))).Assert(c, exp)
buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, ".\\\\windows"))).Assert(c, exp)
buildImage("testforbidsystempaths5", build.WithDockerfile(fmt.Sprintf(dockerfile, "\\\\windows"))).Assert(c, exp)
}
func (s *DockerSuite) TestBuildCopyFromWindowsIsCaseInsensitive(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
COPY foo /
FROM ` + testEnv.MinimalBaseImage() + `
COPY --from=0 c:\\fOo c:\\copied
RUN type c:\\copied
`
cli.Docker(cli.Build("copyfrom-windows-insensitive"), build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("foo", "hello world"),
)).Assert(c, icmd.Expected{
ExitCode: 0,
Out: "hello world",
})
}
// #33176
func (s *DockerSuite) TestBuildCopyFromResetScratch(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerfile := `
FROM busybox
WORKDIR /foo/bar
FROM scratch
ENV FOO=bar
`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
)
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
res := cli.InspectCmd(c, "build1", cli.Format(".Config.WorkingDir")).Combined()
c.Assert(strings.TrimSpace(res), checker.Equals, "")
}
func (s *DockerSuite) TestBuildIntermediateTarget(c *check.C) {
dockerfile := `
FROM busybox AS build-env
CMD ["/dev"]
FROM busybox
CMD ["/dist"]
`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx),
cli.WithFlags("--target", "build-env"))
//res := inspectFieldJSON(c, "build1", "Config.Cmd")
res := cli.InspectCmd(c, "build1", cli.Format("json .Config.Cmd")).Combined()
c.Assert(strings.TrimSpace(res), checker.Equals, `["/dev"]`)
result := cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx),
cli.WithFlags("--target", "nosuchtarget"))
result.Assert(c, icmd.Expected{
ExitCode: 1,
Err: "failed to reach build target",
})
}
// TestBuildOpaqueDirectory tests that a build succeeds which
// creates opaque directories.
// See https://github.com/docker/docker/issues/25244
func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerFile := `
FROM busybox
RUN mkdir /dir1 && touch /dir1/f1
RUN rm -rf /dir1 && mkdir /dir1 && touch /dir1/f2
RUN touch /dir1/f3
RUN [ -f /dir1/f2 ]
`
// Test that build succeeds, last command fails if opaque directory
// was not handled correctly
buildImageSuccessfully(c, "testopaquedirectory", build.WithDockerfile(dockerFile))
}
// Windows test for USER in dockerfile
func (s *DockerSuite) TestBuildWindowsUser(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsuser"
buildImage(name, build.WithDockerfile(`FROM `+testEnv.MinimalBaseImage()+`
RUN net user user /add
USER user
RUN set username
`)).Assert(c, icmd.Expected{
Out: "USERNAME=user",
})
}
// Verifies if COPY file . when WORKDIR is set to a non-existing directory,
// the directory is created and the file is copied into the directory,
// as opposed to the file being copied as a file with the name of the
// directory. Fix for 27545 (found on Windows, but regression good for Linux too).
// Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514.
func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) {
name := "testbuildcopyfiledotwithworkdir"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
WORKDIR /foo
COPY file .
RUN ["cat", "/foo/file"]
`),
build.WithFile("file", "content"),
))
}
// Case-insensitive environment variables on Windows
func (s *DockerSuite) TestBuildWindowsEnvCaseInsensitive(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsenvcaseinsensitive"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+testEnv.MinimalBaseImage()+`
ENV FOO=bar foo=baz
`))
res := inspectFieldJSON(c, name, "Config.Env")
if res != `["foo=baz"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped.
c.Fatalf("Case insensitive environment variables on Windows failed. Got %s", res)
}
}
// Test case for 29667
func (s *DockerSuite) TestBuildWorkdirImageCmd(c *check.C) {
image := "testworkdirimagecmd"
buildImageSuccessfully(c, image, build.WithDockerfile(`
FROM busybox
WORKDIR /foo/bar
`))
out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image)
// The Windows busybox image has a blank `cmd`
lookingFor := `["sh"]`
if testEnv.DaemonPlatform() == "windows" {
lookingFor = "null"
}
c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor)
image = "testworkdirlabelimagecmd"
buildImageSuccessfully(c, image, build.WithDockerfile(`
FROM busybox
WORKDIR /foo/bar
LABEL a=b
`))
out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image)
c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor)
}
// Test case for 28902/28909
func (s *DockerSuite) TestBuildWorkdirCmd(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildworkdircmd"
dockerFile := `
FROM busybox
WORKDIR /
`
buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile))
result := buildImage(name, build.WithDockerfile(dockerFile))
result.Assert(c, icmd.Success)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1)
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorOnBuild(c *check.C) {
name := "test_build_line_error_onbuild"
buildImage(name, build.WithDockerfile(`FROM busybox
ONBUILD
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 2: ONBUILD requires at least one argument",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorUnknownInstruction(c *check.C) {
name := "test_build_line_error_unknown_instruction"
cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox
RUN echo hello world
NOINSTRUCTION echo ba
RUN echo hello
ERROR
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 3: unknown instruction: NOINSTRUCTION",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *check.C) {
name := "test_build_line_error_with_empty_lines"
cli.Docker(cli.Build(name), build.WithDockerfile(`
FROM busybox
RUN echo hello world
NOINSTRUCTION echo ba
CMD ["/bin/init"]
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 6: unknown instruction: NOINSTRUCTION",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorWithComments(c *check.C) {
name := "test_build_line_error_with_comments"
cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox
# This will print hello world
# and then ba
RUN echo hello world
NOINSTRUCTION echo ba
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 5: unknown instruction: NOINSTRUCTION",
})
}
// #31957
func (s *DockerSuite) TestBuildSetCommandWithDefinedShell(c *check.C) {
buildImageSuccessfully(c, "build1", build.WithDockerfile(`
FROM busybox
SHELL ["/bin/sh", "-c"]
`))
buildImageSuccessfully(c, "build2", build.WithDockerfile(`
FROM build1
CMD echo foo
`))
out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", "build2")
c.Assert(strings.TrimSpace(out), checker.Equals, `["/bin/sh","-c","echo foo"]`)
}
func (s *DockerSuite) TestBuildIidFile(c *check.C) {
tmpDir, err := ioutil.TempDir("", "TestBuildIidFile")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tmpIidFile := filepath.Join(tmpDir, "iid")
name := "testbuildiidfile"
// Use a Dockerfile with multiple stages to ensure we get the last one
cli.BuildCmd(c, name,
build.WithDockerfile(`FROM `+minimalBaseImage()+` AS stage1
ENV FOO FOO
FROM `+minimalBaseImage()+`
ENV BAR BAZ`),
cli.WithFlags("--iidfile", tmpIidFile))
id, err := ioutil.ReadFile(tmpIidFile)
c.Assert(err, check.IsNil)
d, err := digest.Parse(string(id))
c.Assert(err, check.IsNil)
c.Assert(d.String(), checker.Equals, getIDByName(c, name))
}
func (s *DockerSuite) TestBuildIidFileCleanupOnFail(c *check.C) {
tmpDir, err := ioutil.TempDir("", "TestBuildIidFileCleanupOnFail")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tmpIidFile := filepath.Join(tmpDir, "iid")
err = ioutil.WriteFile(tmpIidFile, []byte("Dummy"), 0666)
c.Assert(err, check.IsNil)
cli.Docker(cli.Build("testbuildiidfilecleanuponfail"),
build.WithDockerfile(`FROM `+minimalBaseImage()+`
RUN /non/existing/command`),
cli.WithFlags("--iidfile", tmpIidFile)).Assert(c, icmd.Expected{
ExitCode: 1,
})
_, err = os.Stat(tmpIidFile)
c.Assert(err, check.NotNil)
c.Assert(os.IsNotExist(err), check.Equals, true)
}
func (s *DockerSuite) TestBuildIidFileSquash(c *check.C) {
testRequires(c, ExperimentalDaemon)
tmpDir, err := ioutil.TempDir("", "TestBuildIidFileSquash")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tmpIidFile := filepath.Join(tmpDir, "iidsquash")
name := "testbuildiidfilesquash"
// Use a Dockerfile with multiple stages to ensure we get the last one
cli.BuildCmd(c, name,
// This could be minimalBaseImage except
// https://github.com/moby/moby/issues/33823 requires
// `touch` to workaround.
build.WithDockerfile(`FROM busybox
ENV FOO FOO
ENV BAR BAR
RUN touch /foop
`),
cli.WithFlags("--iidfile", tmpIidFile, "--squash"))
id, err := ioutil.ReadFile(tmpIidFile)
c.Assert(err, check.IsNil)
d, err := digest.Parse(string(id))
c.Assert(err, check.IsNil)
c.Assert(d.String(), checker.Equals, getIDByName(c, name))
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
LavalinkClient/src/test/java/lavalink/client/LavalinkTest.java
|
/*
* Copyright (c) 2017 Frederik Ar. Mikkelsen & NoobLance
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package lavalink.client;
import com.mashape.unirest.http.Unirest;
import com.mashape.unirest.http.exceptions.UnirestException;
import com.sedmelluq.discord.lavaplayer.track.AudioTrack;
import com.sedmelluq.discord.lavaplayer.track.AudioTrackEndReason;
import lavalink.client.io.Lavalink;
import lavalink.client.io.Link;
import lavalink.client.player.IPlayer;
import lavalink.client.player.event.PlayerEventListenerAdapter;
import net.dv8tion.jda.core.AccountType;
import net.dv8tion.jda.core.JDA;
import net.dv8tion.jda.core.JDABuilder;
import net.dv8tion.jda.core.entities.VoiceChannel;
import org.json.JSONArray;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
class LavalinkTest {
private static final Logger log = LoggerFactory.getLogger(LavalinkTest.class);
private static JDA jda = null;
private static Lavalink lavalink = null;
private static final String[] BILL_WURTZ_JINGLES = {
"https://www.youtube.com/watch?v=GtwVQbUSasw",
"https://www.youtube.com/watch?v=eNxMkZcySKs",
"https://www.youtube.com/watch?v=4q1Zs3vbX8M",
"https://www.youtube.com/watch?v=sqPTS16mi9M",
"https://www.youtube.com/watch?v=dWqPb16Ox-0",
"https://www.youtube.com/watch?v=mxyPtMON4IM",
"https://www.youtube.com/watch?v=DLutlHlw4C0"
};
@BeforeAll
static void setUp() {
try {
jda = new JDABuilder(AccountType.BOT)
.setToken(System.getenv("TEST_TOKEN"))
.addEventListener(lavalink)
.buildBlocking();
lavalink = new Lavalink("152691313123393536", 1, integer -> jda);
lavalink.addNode(new URI("ws://localhost"), "youshallnotpass");
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@AfterAll
static void tearDown() {
lavalink.shutdown();
jda.shutdown();
}
@Test
void vcJoinTest() {
VoiceChannel vc = jda.getVoiceChannelById(System.getenv("TEST_VOICE_CHANNEL"));
lavalink.openVoiceConnection(vc);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
lavalink.closeVoiceConnection(vc.getGuild());
}
private List<AudioTrack> loadAudioTracks(String identifier) {
try {
JSONArray trackData = Unirest.get("http://localhost:2333/loadtracks?identifier=" + URLEncoder.encode(identifier, "UTF-8"))
.header("Authorization", "youshallnotpass")
.asJson()
.getBody()
.getObject()
.getJSONArray("tracks");
ArrayList<AudioTrack> list = new ArrayList<>();
trackData.forEach(o -> {
try {
list.add(LavalinkUtil.toAudioTrack((String) o));
} catch (IOException e) {
throw new RuntimeException(e);
}
});
return list;
} catch (UnirestException | IOException e) {
throw new RuntimeException(e);
}
}
private void connectAndPlay(AudioTrack track) throws InterruptedException {
VoiceChannel vc = jda.getVoiceChannelById(System.getenv("TEST_VOICE_CHANNEL"));
lavalink.openVoiceConnection(vc);
IPlayer player = lavalink.getPlayer(vc.getGuild().getId());
CountDownLatch latch = new CountDownLatch(1);
PlayerEventListenerAdapter listener = new PlayerEventListenerAdapter() {
@Override
public void onTrackStart(IPlayer player, AudioTrack track) {
latch.countDown();
}
};
player.addListener(listener);
player.playTrack(track);
latch.await(5, TimeUnit.SECONDS);
lavalink.closeVoiceConnection(vc.getGuild());
player.removeListener(listener);
player.stopTrack();
Assertions.assertEquals(0, latch.getCount());
}
@Test
void vcPlayTest() throws InterruptedException {
connectAndPlay(loadAudioTracks("aGOFOP2BIhI").get(0));
}
@Test
void vcStreamTest() throws InterruptedException {
connectAndPlay(loadAudioTracks("https://gensokyoradio.net/GensokyoRadio.m3u").get(0));
}
@Test
void stopTest() throws InterruptedException {
VoiceChannel vc = jda.getVoiceChannelById(System.getenv("TEST_VOICE_CHANNEL"));
lavalink.openVoiceConnection(vc);
IPlayer player = lavalink.getPlayer(vc.getGuild().getId());
CountDownLatch latch = new CountDownLatch(1);
PlayerEventListenerAdapter listener = new PlayerEventListenerAdapter() {
@Override
public void onTrackStart(IPlayer player, AudioTrack track) {
player.stopTrack();
}
@Override
public void onTrackEnd(IPlayer player, AudioTrack track, AudioTrackEndReason endReason) {
if (endReason == AudioTrackEndReason.STOPPED) {
latch.countDown();
}
}
};
player.addListener(listener);
player.playTrack(loadAudioTracks("aGOFOP2BIhI").get(0));
latch.await(5, TimeUnit.SECONDS);
lavalink.closeVoiceConnection(vc.getGuild());
player.removeListener(listener);
player.stopTrack();
Assertions.assertEquals(0, latch.getCount());
}
@Test
void testPlayback() throws InterruptedException {
VoiceChannel vc = jda.getVoiceChannelById(System.getenv("TEST_VOICE_CHANNEL"));
Link link = lavalink.getLink(vc.getGuild());
link.connect(vc);
IPlayer player = link.getPlayer();
CountDownLatch latch = new CountDownLatch(1);
PlayerEventListenerAdapter listener = new PlayerEventListenerAdapter() {
@Override
public void onTrackEnd(IPlayer player, AudioTrack track, AudioTrackEndReason endReason) {
if (endReason == AudioTrackEndReason.FINISHED) {
latch.countDown();
}
}
};
player.addListener(listener);
String jingle = BILL_WURTZ_JINGLES[(int) (Math.random() * BILL_WURTZ_JINGLES.length)];
player.playTrack(loadAudioTracks(jingle).get(0));
latch.await(20, TimeUnit.SECONDS);
link.disconnect();
player.removeListener(listener);
player.stopTrack();
Assertions.assertEquals(0, latch.getCount());
}
}
|
[
"\"TEST_TOKEN\"",
"\"TEST_VOICE_CHANNEL\"",
"\"TEST_VOICE_CHANNEL\"",
"\"TEST_VOICE_CHANNEL\"",
"\"TEST_VOICE_CHANNEL\""
] |
[] |
[
"TEST_VOICE_CHANNEL",
"TEST_TOKEN"
] |
[]
|
["TEST_VOICE_CHANNEL", "TEST_TOKEN"]
|
java
| 2 | 0 | |
pkg/airgap/update.go
|
package airgap
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/pkg/errors"
kotsv1beta1 "github.com/replicatedhq/kots/kotskinds/apis/kots/v1beta1"
apptypes "github.com/replicatedhq/kots/pkg/app/types"
"github.com/replicatedhq/kots/pkg/crypto"
"github.com/replicatedhq/kots/pkg/cursor"
identity "github.com/replicatedhq/kots/pkg/kotsadmidentity"
"github.com/replicatedhq/kots/pkg/kotsutil"
"github.com/replicatedhq/kots/pkg/logger"
"github.com/replicatedhq/kots/pkg/preflight"
"github.com/replicatedhq/kots/pkg/pull"
"github.com/replicatedhq/kots/pkg/store"
"github.com/replicatedhq/kots/pkg/util"
"github.com/replicatedhq/kots/pkg/version"
)
func StartUpdateTaskMonitor(finishedChan <-chan error) {
go func() {
var finalError error
defer func() {
if finalError == nil {
if err := store.GetStore().ClearTaskStatus("update-download"); err != nil {
logger.Error(errors.Wrap(err, "failed to clear update-download task status"))
}
} else {
if err := store.GetStore().SetTaskStatus("update-download", finalError.Error(), "failed"); err != nil {
logger.Error(errors.Wrap(err, "failed to set error on update-download task status"))
}
}
}()
for {
select {
case <-time.After(time.Second):
if err := store.GetStore().UpdateTaskStatusTimestamp("update-download"); err != nil {
logger.Error(err)
}
case err := <-finishedChan:
finalError = err
return
}
}
}()
}
func UpdateAppFromAirgap(a *apptypes.App, airgapBundlePath string, deploy bool, skipPreflights bool) (finalError error) {
finishedChan := make(chan error)
defer close(finishedChan)
StartUpdateTaskMonitor(finishedChan)
defer func() {
finishedChan <- finalError
}()
if err := store.GetStore().SetTaskStatus("update-download", "Extracting files...", "running"); err != nil {
return errors.Wrap(err, "failed to set task status")
}
airgapRoot, err := extractAppMetaFromAirgapBundle(airgapBundlePath)
if err != nil {
return errors.Wrap(err, "failed to extract archive")
}
defer os.RemoveAll(airgapRoot)
err = UpdateAppFromPath(a, airgapRoot, airgapBundlePath, deploy, skipPreflights)
return errors.Wrap(err, "failed to update app")
}
func UpdateAppFromPath(a *apptypes.App, airgapRoot string, airgapBundlePath string, deploy bool, skipPreflights bool) error {
if err := store.GetStore().SetTaskStatus("update-download", "Processing package...", "running"); err != nil {
return errors.Wrap(err, "failed to set tasks status")
}
registrySettings, err := store.GetStore().GetRegistryDetailsForApp(a.ID)
if err != nil {
return errors.Wrap(err, "failed to get app registry settings")
}
currentArchivePath, err := ioutil.TempDir("", "kotsadm")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
defer os.RemoveAll(currentArchivePath)
err = store.GetStore().GetAppVersionArchive(a.ID, a.CurrentSequence, currentArchivePath)
if err != nil {
return errors.Wrap(err, "failed to get current archive")
}
beforeKotsKinds, err := kotsutil.LoadKotsKindsFromPath(currentArchivePath)
if err != nil {
return errors.Wrap(err, "failed to load current kotskinds")
}
if beforeKotsKinds.License == nil {
err := errors.New("no license found in application")
return err
}
if err := store.GetStore().SetTaskStatus("update-download", "Processing app package...", "running"); err != nil {
return errors.Wrap(err, "failed to set task status")
}
appNamespace := os.Getenv("POD_NAMESPACE")
if os.Getenv("KOTSADM_TARGET_NAMESPACE") != "" {
appNamespace = os.Getenv("KOTSADM_TARGET_NAMESPACE")
}
if err := store.GetStore().SetTaskStatus("update-download", "Creating app version...", "running"); err != nil {
return errors.Wrap(err, "failed to set task status")
}
appSequence, err := version.GetNextAppSequence(a.ID, &a.CurrentSequence)
if err != nil {
return errors.Wrap(err, "failed to get new app sequence")
}
pipeReader, pipeWriter := io.Pipe()
go func() {
scanner := bufio.NewScanner(pipeReader)
for scanner.Scan() {
if err := store.GetStore().SetTaskStatus("update-download", scanner.Text(), "running"); err != nil {
logger.Error(err)
}
}
pipeReader.CloseWithError(scanner.Err())
}()
// Using license from db instead of upstream bundle because the one in db has not been re-marshalled
license, err := pull.ParseLicenseFromBytes([]byte(a.License))
if err != nil {
return errors.Wrap(err, "failed parse license")
}
identityConfigFile := filepath.Join(currentArchivePath, "upstream", "userdata", "identityconfig.yaml")
if _, err := os.Stat(identityConfigFile); os.IsNotExist(err) {
file, err := identity.InitAppIdentityConfig(a.Slug, kotsv1beta1.Storage{}, crypto.AESCipher{})
if err != nil {
return errors.Wrap(err, "failed to init identity config")
}
identityConfigFile = file
defer os.Remove(identityConfigFile)
} else if err != nil {
return errors.Wrap(err, "failed to get stat identity config file")
}
pullOptions := pull.PullOptions{
LicenseObj: license,
Namespace: appNamespace,
ConfigFile: filepath.Join(currentArchivePath, "upstream", "userdata", "config.yaml"),
IdentityConfigFile: identityConfigFile,
AirgapRoot: airgapRoot,
AirgapBundle: airgapBundlePath,
InstallationFile: filepath.Join(currentArchivePath, "upstream", "userdata", "installation.yaml"),
UpdateCursor: beforeKotsKinds.Installation.Spec.UpdateCursor,
RootDir: currentArchivePath,
ExcludeKotsKinds: true,
ExcludeAdminConsole: true,
CreateAppDir: false,
ReportWriter: pipeWriter,
Silent: true,
RewriteImages: true,
RewriteImageOptions: pull.RewriteImageOptions{
ImageFiles: filepath.Join(airgapRoot, "images"),
Host: registrySettings.Hostname,
Namespace: registrySettings.Namespace,
Username: registrySettings.Username,
Password: registrySettings.Password,
},
AppSlug: a.Slug,
AppSequence: appSequence,
}
if _, err := pull.Pull(fmt.Sprintf("replicated://%s", beforeKotsKinds.License.Spec.AppSlug), pullOptions); err != nil {
return errors.Wrap(err, "failed to pull")
}
afterKotsKinds, err := kotsutil.LoadKotsKindsFromPath(currentArchivePath)
if err != nil {
return errors.Wrap(err, "failed to read after kotskinds")
}
bc, err := cursor.NewCursor(beforeKotsKinds.Installation.Spec.UpdateCursor)
if err != nil {
return errors.Wrap(err, "failed to create bc")
}
ac, err := cursor.NewCursor(afterKotsKinds.Installation.Spec.UpdateCursor)
if err != nil {
return errors.Wrap(err, "failed to create ac")
}
if !bc.Comparable(ac) {
return errors.Errorf("cannot compare %q and %q", beforeKotsKinds.Installation.Spec.UpdateCursor, afterKotsKinds.Installation.Spec.UpdateCursor)
}
if bc.Equal(ac) {
return util.ActionableError{
NoRetry: true,
Message: fmt.Sprintf("Version %s (%s) cannot be installed again because it is already the current version", afterKotsKinds.Installation.Spec.VersionLabel, afterKotsKinds.Installation.Spec.UpdateCursor),
}
} else if bc.After(ac) {
return util.ActionableError{
NoRetry: true,
Message: fmt.Sprintf("Version %s (%s) cannot be installed because version %s (%s) is newer", afterKotsKinds.Installation.Spec.VersionLabel, afterKotsKinds.Installation.Spec.UpdateCursor, beforeKotsKinds.Installation.Spec.VersionLabel, beforeKotsKinds.Installation.Spec.UpdateCursor),
}
}
// Create the app in the db
newSequence, err := store.GetStore().CreateAppVersion(a.ID, &a.CurrentSequence, currentArchivePath, "Airgap Upload", skipPreflights, &version.DownstreamGitOps{})
if err != nil {
return errors.Wrap(err, "failed to create new version")
}
if !skipPreflights {
if err := preflight.Run(a.ID, a.Slug, newSequence, true, currentArchivePath); err != nil {
return errors.Wrap(err, "failed to start preflights")
}
}
if deploy {
err := version.DeployVersion(a.ID, newSequence)
if err != nil {
return errors.Wrap(err, "failed to deploy app version")
}
}
return nil
}
|
[
"\"POD_NAMESPACE\"",
"\"KOTSADM_TARGET_NAMESPACE\"",
"\"KOTSADM_TARGET_NAMESPACE\""
] |
[] |
[
"POD_NAMESPACE",
"KOTSADM_TARGET_NAMESPACE"
] |
[]
|
["POD_NAMESPACE", "KOTSADM_TARGET_NAMESPACE"]
|
go
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoTuto.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
environment/k8s_helm_resource.go
|
package environment
import (
"context"
"fmt"
"net/url"
"os"
"path/filepath"
"time"
"github.com/rs/zerolog/log"
"github.com/smartcontractkit/integrations-framework/config"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/chartutil"
"helm.sh/helm/v3/pkg/kube"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/portforward"
)
const (
// HelmInstallTimeout timeout for installing a helm chart
HelmInstallTimeout = 200 * time.Second
// ReleasePrefix the default prefix
ReleasePrefix = "release"
// DefaultK8sConfigPath the default path for kube
DefaultK8sConfigPath = ".kube/config"
)
// SetValuesHelmFunc interface for setting values in a helm chart
type SetValuesHelmFunc func(resource *HelmChart) error
// PodForwardedInfo data to port forward the pods
type PodForwardedInfo struct {
PodIP string
ForwardedPorts []portforward.ForwardedPort
PodName string
}
// HelmChart common helm chart data
type HelmChart struct {
id string
chartPath string
releaseName string
actionConfig *action.Configuration
env *K8sEnvironment
network *config.NetworkConfig
SetValuesHelmFunc SetValuesHelmFunc
// Deployment properties
pods []PodForwardedInfo
values map[string]interface{}
stopChannels []chan struct{}
}
// Teardown tears down the helm release
func (k *HelmChart) Teardown() error {
// closing forwarded ports
for _, stopChan := range k.stopChannels {
stopChan <- struct{}{}
}
log.Debug().Str("Release", k.releaseName).Msg("Uninstalling Helm release")
if _, err := action.NewUninstall(k.actionConfig).Run(k.releaseName); err != nil {
return err
}
return nil
}
// ID returns the helm chart id
func (k *HelmChart) ID() string {
return k.id
}
// SetValue sets the specified value in the chart
func (k *HelmChart) SetValue(key string, val interface{}) {
k.values[key] = val
}
// GetConfig gets the helms environment config
func (k *HelmChart) GetConfig() *config.Config {
return k.env.config
}
// Values returns the helm charts values
func (k *HelmChart) Values() map[string]interface{} {
return k.values
}
// SetEnvironment sets the environment
func (k *HelmChart) SetEnvironment(environment *K8sEnvironment) error {
k.env = environment
return nil
}
// Environment gets environment
func (k *HelmChart) Environment() *K8sEnvironment {
return k.env
}
func (k *HelmChart) forwardAllPodsPorts() error {
k8sPods := k.env.k8sClient.CoreV1().Pods(k.env.namespace.Name)
pods, err := k8sPods.List(context.Background(), metaV1.ListOptions{
LabelSelector: k.releaseSelector(),
})
if err != nil {
return err
}
for _, p := range pods.Items {
ports, err := forwardPodPorts(&p, k.env.k8sConfig, k.env.namespace.Name, k.stopChannels)
if err != nil {
return fmt.Errorf("unable to forward ports: %v", err)
}
k.pods = append(k.pods, PodForwardedInfo{
PodIP: p.Status.PodIP,
ForwardedPorts: ports,
PodName: p.Name,
})
log.Info().Str("Manifest ID", k.id).Interface("Ports", ports).Msg("Forwarded ports")
}
return nil
}
// WaitUntilHealthy waits until the helm release is healthy
func (k *HelmChart) WaitUntilHealthy() error {
// using helm Wait option before, not need to wait for pods to be deployed there
if err := k.forwardAllPodsPorts(); err != nil {
return err
}
if k.values == nil {
k.values = make(map[string]interface{})
}
if k.SetValuesHelmFunc != nil {
if err := k.SetValuesHelmFunc(k); err != nil {
return err
}
}
return nil
}
func (k *HelmChart) releaseSelector() string {
return fmt.Sprintf("%s=%s", ReleasePrefix, k.releaseName)
}
// ServiceDetails gets the details of the released service
func (k *HelmChart) ServiceDetails() ([]*ServiceDetails, error) {
var serviceDetails []*ServiceDetails
for _, pod := range k.pods {
for _, port := range pod.ForwardedPorts {
remoteURL, err := url.Parse(fmt.Sprintf("http://%s:%d", pod.PodIP, port.Remote))
if err != nil {
return serviceDetails, err
}
localURL, err := url.Parse(fmt.Sprintf("http://127.0.0.1:%d", port.Local))
if err != nil {
return serviceDetails, err
}
serviceDetails = append(serviceDetails, &ServiceDetails{
RemoteURL: remoteURL,
LocalURL: localURL,
})
}
}
return serviceDetails, nil
}
// Deploy deploys the helm charts
func (k *HelmChart) Deploy(_ map[string]interface{}) error {
log.Info().Str("Path", k.chartPath).
Str("Release", k.releaseName).
Str("Namespace", k.env.namespace.Name).
Msg("Installing Helm chart")
chart, err := loader.Load(k.chartPath)
if err != nil {
return err
}
chart.Values, err = chartutil.CoalesceValues(chart, k.values)
if err != nil {
return err
}
homeDir, err := os.UserHomeDir()
if err != nil {
return err
}
k.actionConfig = &action.Configuration{}
// TODO: So, this is annoying, and not really all that important, I SHOULD be able to just use our K8sConfig function
// and pass that in as our config, but K8s has like 10 different config types, all of which don't talk to each other,
// and this wants an interface, instead of the rest config that we use everywhere else. Creating such an interface is
// also a huge hassle and... well anyway, if you've got some time to burn to make this more sensical, I hope you like
// digging into K8s code with sparse to no docs.
kubeConfigPath := filepath.Join(homeDir, DefaultK8sConfigPath)
if len(os.Getenv("KUBECONFIG")) > 0 {
kubeConfigPath = os.Getenv("KUBECONFIG")
}
if err := k.actionConfig.Init(
kube.GetConfig(kubeConfigPath, "", k.env.namespace.Name),
k.env.namespace.Name,
os.Getenv("HELM_DRIVER"),
func(format string, v ...interface{}) {
log.Debug().Str("LogType", "Helm").Msg(fmt.Sprintf(format, v...))
}); err != nil {
return err
}
install := action.NewInstall(k.actionConfig)
install.Namespace = k.env.namespace.Name
install.ReleaseName = k.releaseName
install.Timeout = HelmInstallTimeout
// blocks until all pods are healthy
install.Wait = true
_, err = install.Run(chart, nil)
if err != nil {
return err
}
log.Info().
Str("Namespace", k.env.namespace.Name).
Str("Release", k.releaseName).
Str("Chart", k.chartPath).
Msg("Succesfully installed helm chart")
return nil
}
|
[
"\"KUBECONFIG\"",
"\"KUBECONFIG\"",
"\"HELM_DRIVER\""
] |
[] |
[
"HELM_DRIVER",
"KUBECONFIG"
] |
[]
|
["HELM_DRIVER", "KUBECONFIG"]
|
go
| 2 | 0 | |
tensorflow_transform/beam/analyzer_cache_test.py
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_transform.beam.analyzer_cache."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# GOOGLE-INITIALIZATION
import apache_beam as beam
from apache_beam.testing import util as beam_test_util
import numpy as np
from tensorflow_transform import analyzer_nodes
from tensorflow_transform.beam import analyzer_cache
from tensorflow_transform import test_case
class AnalyzerCacheTest(test_case.TransformTestCase):
def test_validate_dataset_keys(self):
analyzer_cache.validate_dataset_keys(
{'foo', 'Foo', 'A1', 'A_1', 'A.1', 'A-1'})
for key in {'foo 1', 'foo@1', 'foo*', 'foo[]', 'foo/goo'}:
with self.assertRaisesRegexp(
ValueError, 'Dataset key .* does not match allowed pattern:'):
analyzer_cache.validate_dataset_keys({key})
@test_case.named_parameters(
dict(
testcase_name='JsonNumpyCacheCoder',
coder_cls=analyzer_nodes.JsonNumpyCacheCoder,
value=[1, 2.5, 3, '4']),
dict(
testcase_name='_VocabularyAccumulatorCoder',
coder_cls=analyzer_nodes._VocabularyAccumulatorCoder,
value=['A', 17]),
)
def test_coders_round_trip(self, coder_cls, value):
coder = coder_cls()
encoded = coder.encode_cache(value)
np.testing.assert_equal(value, coder.decode_cache(encoded))
def test_cache_helpers_round_trip(self):
base_test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
with beam.Pipeline() as p:
cache_pcoll_dict = {
'dataset_key_0': {
'a': p | 'CreateA' >> beam.Create([b'[1, 2, 3]']),
'b': p | 'CreateB' >> beam.Create([b'[5]']),
},
'dataset_key_1': {
'c': p | 'CreateC' >> beam.Create([b'[9, 5, 2, 1]']),
},
}
_ = cache_pcoll_dict | analyzer_cache.WriteAnalysisCacheToFS(
base_test_dir)
with beam.Pipeline() as p:
read_cache = p | analyzer_cache.ReadAnalysisCacheFromFS(
base_test_dir, list(cache_pcoll_dict.keys()))
def assert_equal_matcher(expected_encoded):
def _assert_equal(encoded_cache_list):
(encode_cache,) = encoded_cache_list
self.assertEqual(expected_encoded, encode_cache)
return _assert_equal
beam_test_util.assert_that(
read_cache['dataset_key_0'][analyzer_cache.make_cache_entry_key('a')],
beam_test_util.equal_to([b'[1, 2, 3]']),
label='AssertA')
beam_test_util.assert_that(
read_cache['dataset_key_0'][analyzer_cache.make_cache_entry_key('b')],
assert_equal_matcher(b'[5]'),
label='AssertB')
beam_test_util.assert_that(
read_cache['dataset_key_1'][analyzer_cache.make_cache_entry_key('c')],
assert_equal_matcher(b'[9, 5, 2, 1]'),
label='AssertC')
if __name__ == '__main__':
test_case.main()
|
[] |
[] |
[
"TEST_UNDECLARED_OUTPUTS_DIR"
] |
[]
|
["TEST_UNDECLARED_OUTPUTS_DIR"]
|
python
| 1 | 0 | |
temp/ppo/minimal_ppo.py
|
import os
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
import numpy as np
#Hyperparameters
learning_rate = 0.0003
gamma = 0.9
lmbda = 0.9
eps_clip = 0.2
K_epoch = 10
rollout_len = 3
buffer_size = 30
minibatch_size = 32
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
class PPO(nn.Module):
def __init__(self):
super(PPO, self).__init__()
self.data = []
self.fc1 = nn.Linear(3,128)
self.fc_mu = nn.Linear(128,1)
self.fc_std = nn.Linear(128,1)
self.fc_v = nn.Linear(128,1)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
self.optimization_step = 0
def pi(self, x, softmax_dim = 0):
x = F.relu(self.fc1(x))
mu = 2.0*torch.tanh(self.fc_mu(x))
std = F.softplus(self.fc_std(x))
return mu, std
def v(self, x):
x = F.relu(self.fc1(x))
v = self.fc_v(x)
return v
def put_data(self, transition):
self.data.append(transition)
def make_batch(self):
s_batch, a_batch, r_batch, s_prime_batch, prob_a_batch, done_batch = [], [], [], [], [], []
data = []
for j in range(buffer_size):
for i in range(minibatch_size):
rollout = self.data.pop()
s_lst, a_lst, r_lst, s_prime_lst, prob_a_lst, done_lst = [], [], [], [], [], []
for transition in rollout:
s, a, r, s_prime, prob_a, done = transition
s_lst.append(s)
a_lst.append([a])
r_lst.append([r])
s_prime_lst.append(s_prime)
prob_a_lst.append([prob_a])
done_mask = 0 if done else 1
done_lst.append([done_mask])
s_batch.append(s_lst)
a_batch.append(a_lst)
r_batch.append(r_lst)
s_prime_batch.append(s_prime_lst)
prob_a_batch.append(prob_a_lst)
done_batch.append(done_lst)
mini_batch = torch.tensor(s_batch, dtype=torch.float), torch.tensor(a_batch, dtype=torch.float), \
torch.tensor(r_batch, dtype=torch.float), torch.tensor(s_prime_batch, dtype=torch.float), \
torch.tensor(done_batch, dtype=torch.float), torch.tensor(prob_a_batch, dtype=torch.float)
data.append(mini_batch)
return data
def calc_advantage(self, data):
data_with_adv = []
for mini_batch in data:
s, a, r, s_prime, done_mask, old_log_prob = mini_batch
with torch.no_grad():
td_target = r + gamma * self.v(s_prime) * done_mask
delta = td_target - self.v(s)
delta = delta.numpy()
advantage_lst = []
advantage = 0.0
for delta_t in delta[::-1]:
advantage = gamma * lmbda * advantage + delta_t[0]
advantage_lst.append([advantage])
advantage_lst.reverse()
advantage = torch.tensor(advantage_lst, dtype=torch.float)
data_with_adv.append((s, a, r, s_prime, done_mask, old_log_prob, td_target, advantage))
return data_with_adv
def train_net(self):
if len(self.data) == minibatch_size * buffer_size:
data = self.make_batch()
data = self.calc_advantage(data)
for i in range(K_epoch):
for mini_batch in data:
s, a, r, s_prime, done_mask, old_log_prob, td_target, advantage = mini_batch
mu, std = self.pi(s, softmax_dim=1)
dist = Normal(mu, std)
log_prob = dist.log_prob(a)
ratio = torch.exp(log_prob - old_log_prob) # a/b == exp(log(a)-log(b))
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1-eps_clip, 1+eps_clip) * advantage
loss = -torch.min(surr1, surr2) + F.smooth_l1_loss(self.v(s) , td_target)
self.optimizer.zero_grad()
loss.mean().backward()
nn.utils.clip_grad_norm_(self.parameters(), 1.0)
self.optimizer.step()
self.optimization_step += 1
def main():
env = gym.make('Pendulum-v0')
model = PPO()
score = 0.0
print_interval = 20
rollout = []
for n_epi in range(10000):
s = env.reset()
done = False
while not done:
for t in range(rollout_len):
mu, std = model.pi(torch.from_numpy(s).float())
dist = Normal(mu, std)
a = dist.rsample()
log_prob = dist.log_prob(a)
s_prime, r, done, info = env.step([a.item()])
rollout.append((s, a, r/10.0, s_prime, log_prob.item(), done))
if len(rollout) == rollout_len:
model.put_data(rollout)
rollout = []
s = s_prime
score += r
if done:
break
model.train_net()
if n_epi%print_interval==0 and n_epi!=0:
print("# of episode :{}, avg score : {:.1f}, opt step: {}".format(n_epi, score/print_interval, model.optimization_step))
score = 0.0
env.close()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
qa/L0_infer/infer_test.py
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
import os
from tritonclientutils import *
TEST_SYSTEM_SHARED_MEMORY = bool(
int(os.environ.get('TEST_SYSTEM_SHARED_MEMORY', 0)))
TEST_CUDA_SHARED_MEMORY = bool(int(os.environ.get('TEST_CUDA_SHARED_MEMORY',
0)))
CPU_ONLY = (os.environ.get('TRITON_SERVER_CPU_ONLY') is not None)
USE_GRPC = (os.environ.get('USE_GRPC', 1) != "0")
USE_HTTP = (os.environ.get('USE_HTTP', 1) != "0")
assert USE_GRPC or USE_HTTP, "USE_GRPC or USE_HTTP must be non-zero"
BACKENDS = os.environ.get('BACKENDS',
"graphdef savedmodel onnx libtorch plan python")
ENSEMBLES = bool(int(os.environ.get('ENSEMBLES', 1)))
OS_WINDOWS = bool(int(os.environ.get('OS_WINDOWS', 0)))
np_dtype_string = np.dtype(object)
class InferTest(tu.TestResultCollector):
def _full_exact(self, input_dtype, output0_dtype, output1_dtype,
output0_raw, output1_raw, swap):
def _infer_exact_helper(tester,
pf,
tensor_shape,
batch_size,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=True,
output1_raw=True,
model_version=None,
swap=False,
outputs=("OUTPUT0", "OUTPUT1"),
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_http_json_tensors=True,
skip_request_id_check=True,
use_streaming=True,
correlation_id=0):
for bs in (1, batch_size):
# model that does not support batching
if bs == 1:
iu.infer_exact(
tester,
pf + "_nobatch",
tensor_shape,
bs,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
model_version=model_version,
swap=swap,
outputs=outputs,
use_http=use_http,
use_grpc=use_grpc,
use_http_json_tensors=use_http_json_tensors,
skip_request_id_check=skip_request_id_check,
use_streaming=use_streaming,
correlation_id=correlation_id,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
# model that supports batching
iu.infer_exact(
tester,
pf, (bs,) + tensor_shape,
bs,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
model_version=model_version,
swap=swap,
outputs=outputs,
use_http=use_http,
use_grpc=use_grpc,
use_http_json_tensors=use_http_json_tensors,
skip_request_id_check=skip_request_id_check,
use_streaming=use_streaming,
correlation_id=correlation_id,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
input_size = 16
all_ensemble_prefix = ["simple_", "sequence_", "fan_"]
ensemble_prefix = [""]
if ENSEMBLES and OS_WINDOWS:
for prefix in all_ensemble_prefix:
if tu.validate_for_ensemble_model(prefix, input_dtype,
output0_dtype, output1_dtype,
(input_size,), (input_size,),
(input_size,)):
ensemble_prefix.append(prefix)
if tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,
(input_size,), (input_size,),
(input_size,)):
for prefix in ensemble_prefix:
for pf in ["graphdef", "savedmodel"]:
if pf in BACKENDS:
_infer_exact_helper(self,
prefix + pf, (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if not CPU_ONLY and tu.validate_for_trt_model(
input_dtype, output0_dtype, output1_dtype, (input_size, 1, 1),
(input_size, 1, 1), (input_size, 1, 1)):
for prefix in ensemble_prefix:
if 'plan' in BACKENDS:
if input_dtype == np.int8:
_infer_exact_helper(self,
prefix + 'plan', (input_size, 1, 1),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
else:
_infer_exact_helper(self,
prefix + 'plan', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype,
(input_size,), (input_size,),
(input_size,)):
for prefix in ensemble_prefix:
if 'onnx' in BACKENDS:
_infer_exact_helper(self,
prefix + 'onnx', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_libtorch_model(input_dtype, output0_dtype,
output1_dtype, (input_size,),
(input_size,), (input_size,)):
for prefix in ensemble_prefix:
if 'libtorch' in BACKENDS:
_infer_exact_helper(self,
prefix + 'libtorch', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if prefix == "":
if 'python' in BACKENDS:
_infer_exact_helper(self,
prefix + 'python', (input_size,),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
def test_raw_bbb(self):
self._full_exact(np.int8,
np.int8,
np.int8,
output0_raw=True,
output1_raw=True,
swap=True)
def test_raw_sss(self):
self._full_exact(np.int16,
np.int16,
np.int16,
output0_raw=True,
output1_raw=True,
swap=True)
def test_raw_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32,
output0_raw=True,
output1_raw=True,
swap=True)
def test_raw_lll(self):
self._full_exact(np.int64,
np.int64,
np.int64,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_hhh(self):
self._full_exact(np.float16,
np.float16,
np.float16,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_fff(self):
self._full_exact(np.float32,
np.float32,
np.float32,
output0_raw=True,
output1_raw=True,
swap=True)
def test_raw_hff(self):
self._full_exact(np.float16,
np.float32,
np.float32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_bii(self):
self._full_exact(np.int8,
np.int32,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ibb(self):
self._full_exact(np.int32,
np.int8,
np.int8,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ibs(self):
self._full_exact(np.int32,
np.int8,
np.int16,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_iff(self):
self._full_exact(np.int32,
np.float32,
np.float32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_fii(self):
self._full_exact(np.float32,
np.int32,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ihs(self):
self._full_exact(np.int32,
np.float16,
np.int16,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ooo(self):
self._full_exact(np_dtype_string,
np_dtype_string,
np_dtype_string,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_oii(self):
self._full_exact(np_dtype_string,
np.int32,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_oio(self):
self._full_exact(np_dtype_string,
np.int32,
np_dtype_string,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ooi(self):
self._full_exact(np_dtype_string,
np_dtype_string,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ioo(self):
self._full_exact(np.int32,
np_dtype_string,
np_dtype_string,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_iio(self):
self._full_exact(np.int32,
np.int32,
np_dtype_string,
output0_raw=True,
output1_raw=True,
swap=False)
def test_raw_ioi(self):
self._full_exact(np.int32,
np_dtype_string,
np.int32,
output0_raw=True,
output1_raw=True,
swap=False)
# shared memory does not support class output
if not (TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY):
def test_class_bbb(self):
self._full_exact(np.int8,
np.int8,
np.int8,
output0_raw=False,
output1_raw=False,
swap=True)
def test_class_sss(self):
self._full_exact(np.int16,
np.int16,
np.int16,
output0_raw=False,
output1_raw=False,
swap=True)
def test_class_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32,
output0_raw=False,
output1_raw=False,
swap=True)
def test_class_lll(self):
self._full_exact(np.int64,
np.int64,
np.int64,
output0_raw=False,
output1_raw=False,
swap=False)
def test_class_fff(self):
self._full_exact(np.float32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=False,
swap=True)
def test_class_iff(self):
self._full_exact(np.int32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=False,
swap=False)
def test_mix_bbb(self):
self._full_exact(np.int8,
np.int8,
np.int8,
output0_raw=True,
output1_raw=False,
swap=True)
def test_mix_sss(self):
self._full_exact(np.int16,
np.int16,
np.int16,
output0_raw=False,
output1_raw=True,
swap=True)
def test_mix_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32,
output0_raw=True,
output1_raw=False,
swap=True)
def test_mix_lll(self):
self._full_exact(np.int64,
np.int64,
np.int64,
output0_raw=False,
output1_raw=True,
swap=False)
def test_mix_fff(self):
self._full_exact(np.float32,
np.float32,
np.float32,
output0_raw=True,
output1_raw=False,
swap=True)
def test_mix_iff(self):
self._full_exact(np.int32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=True,
swap=False)
def test_raw_version_latest_1(self):
input_size = 16
tensor_shape = (1, input_size)
# There are 3 versions of graphdef_int8_int8_int8 but
# only version 3 should be available
for platform in ('graphdef', 'savedmodel'):
if platform not in BACKENDS:
continue
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.int8,
np.int8,
np.int8,
model_version=1,
swap=False,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.int8,
np.int8,
np.int8,
model_version=2,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int8,
np.int8,
np.int8,
model_version=3,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
def test_raw_version_latest_2(self):
input_size = 16
tensor_shape = (1, input_size)
# There are 3 versions of graphdef_int16_int16_int16 but only
# versions 2 and 3 should be available
for platform in ('graphdef', 'savedmodel'):
if platform not in BACKENDS:
continue
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.int16,
np.int16,
np.int16,
model_version=1,
swap=False,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int16,
np.int16,
np.int16,
model_version=2,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int16,
np.int16,
np.int16,
model_version=3,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
def test_raw_version_all(self):
input_size = 16
tensor_shape = (1, input_size)
# There are 3 versions of *_int32_int32_int32 and all should
# be available.
for platform in ('graphdef', 'savedmodel'):
if platform not in BACKENDS:
continue
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
model_version=1,
swap=False,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
model_version=2,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
model_version=3,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
def test_raw_version_specific_1(self):
input_size = 16
tensor_shape = (1, input_size)
# There are 3 versions of *_float16_float16_float16 but only
# version 1 should be available.
for platform in ('graphdef', 'savedmodel'):
if platform not in BACKENDS:
continue
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.float16,
np.float16,
np.float16,
model_version=1,
swap=False,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.float16,
np.float16,
np.float16,
model_version=2,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.float16,
np.float16,
np.float16,
model_version=3,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
def test_raw_version_specific_1_3(self):
input_size = 16
# There are 3 versions of *_float32_float32_float32 but only
# versions 1 and 3 should be available.
for platform in ('graphdef', 'savedmodel', 'plan'):
if platform == 'plan' and CPU_ONLY:
continue
if platform not in BACKENDS:
continue
tensor_shape = (1, input_size)
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1,
swap=False,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
try:
iu.infer_exact(
self,
platform,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=2,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except InferenceServerException as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
iu.infer_exact(self,
platform,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=3,
swap=True,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if ENSEMBLES:
if all(x in BACKENDS for x in ['graphdef', 'savedmodel']):
def test_ensemble_mix_platform(self):
# Skip on CPU only machine as TensorRT model is used in this ensemble
if CPU_ONLY:
return
for bs in (1, 8):
iu.infer_exact(
self,
"mix_platform", (bs, 16),
bs,
np.float32,
np.float32,
np.float32,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if "graphdef" in BACKENDS:
def test_ensemble_mix_type(self):
for bs in (1, 8):
iu.infer_exact(
self,
"mix_type", (bs, 16),
bs,
np.int32,
np.float32,
np.float32,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if all(x in BACKENDS for x in ['graphdef', 'savedmodel']):
def test_ensemble_mix_ensemble(self):
for bs in (1, 8):
iu.infer_exact(
self,
"mix_ensemble", (bs, 16),
bs,
np.int32,
np.float32,
np.float32,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if all(x in BACKENDS for x in ['graphdef',]) and not OS_WINDOWS:
def test_ensemble_mix_batch_nobatch(self):
base_names = ["batch_to_nobatch", "nobatch_to_batch"]
for name in base_names:
for bs in (1, 8):
iu.infer_exact(
self,
name, (bs, 16),
bs,
np.float32,
np.float32,
np.float32,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
iu.infer_exact(
self,
name + "_nobatch", (8, 16),
1,
np.float32,
np.float32,
np.float32,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
# batch -> nobatch -> batch
for bs in (1, 8):
iu.infer_exact(
self,
"mix_nobatch_batch", (bs, 16),
bs,
np.float32,
np.float32,
np.float32,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if not (TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY):
def test_ensemble_label_lookup(self):
if all(x in BACKENDS for x in ['graphdef', 'savedmodel']):
# Ensemble needs to look up label from the actual model
for bs in (1, 8):
iu.infer_exact(
self,
"mix_platform", (bs, 16),
bs,
np.float32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=False,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if all(x in BACKENDS for x in ['graphdef', 'savedmodel']):
# Label from the actual model will be passed along the nested ensemble
for bs in (1, 8):
iu.infer_exact(
self,
"mix_ensemble", (bs, 16),
bs,
np.int32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=False,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if "graphdef" in BACKENDS:
# If label file is provided, it will use the provided label file directly
try:
iu.infer_exact(
self,
"wrong_label", (1, 16),
1,
np.int32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=False,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
except AssertionError:
# Sanity check that infer_exact failed since this ensemble is provided
# with unexpected labels
pass
if "graphdef" in BACKENDS:
for bs in (1, 8):
iu.infer_exact(
self,
"label_override", (bs, 16),
bs,
np.int32,
np.float32,
np.float32,
output0_raw=False,
output1_raw=False,
use_http=USE_HTTP,
use_grpc=USE_GRPC,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"USE_HTTP",
"OS_WINDOWS",
"ENSEMBLES",
"TEST_CUDA_SHARED_MEMORY",
"TEST_SYSTEM_SHARED_MEMORY",
"TRITON_SERVER_CPU_ONLY",
"BACKENDS",
"USE_GRPC"
] |
[]
|
["USE_HTTP", "OS_WINDOWS", "ENSEMBLES", "TEST_CUDA_SHARED_MEMORY", "TEST_SYSTEM_SHARED_MEMORY", "TRITON_SERVER_CPU_ONLY", "BACKENDS", "USE_GRPC"]
|
python
| 8 | 0 | |
hackerank/2d-array/main.go
|
package main
import (
"bufio"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
)
func prettyPrint(arr [][]int32) {
for i := range arr {
for j := range arr[i] {
fmt.Printf("%v ", arr[i][j])
}
fmt.Println()
}
}
func max(a, b int32) int32 {
if a > b {
return a
}
return b
}
// Complete the hourglassSum function below.
func hourglassSum(arr [][]int32) int32 {
maxSum := int32(math.MinInt32)
for i := 0; i < 4; i++ {
for j := 0; j < 4; j++ {
var newArr [][]int32
for k := 0; k < 3; k++ {
var arrRow []int32
for l := 0; l < 3; l++ {
arrRow = append(arrRow, arr[k+i][l+j])
}
newArr = append(newArr, arrRow)
}
//prettyPrint(newArr)
maxSum = max(subSum(newArr), maxSum)
//fmt.Println(maxSum)
}
}
return maxSum
}
func subSum(arr [][]int32) int32 {
var result int32 = 0
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
if (i == 1 && j == 0) || (i == 1 && j == 2) {
continue
}
result += arr[i][j]
}
}
return result
}
func main() {
reader := bufio.NewReaderSize(os.Stdin, 1024 * 1024)
//stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
//checkError(err)
//defer stdout.Close()
//writer := bufio.NewWriterSize(stdout, 1024 * 1024)
var arr [][]int32
for i := 0; i < 6; i++ {
arrRowTemp := strings.Split(readLine(reader), " ")
var arrRow []int32
for _, arrRowItem := range arrRowTemp {
arrItemTemp, err := strconv.ParseInt(arrRowItem, 10, 64)
checkError(err)
arrItem := int32(arrItemTemp)
arrRow = append(arrRow, arrItem)
}
if len(arrRow) != int(6) {
panic("Bad input")
}
arr = append(arr, arrRow)
}
result := hourglassSum(arr)
fmt.Print(result)
//fmt.Fprintf(writer, "%d\n", result)
//writer.Flush()
}
func readLine(reader *bufio.Reader) string {
str, _, err := reader.ReadLine()
if err == io.EOF {
return ""
}
return strings.TrimRight(string(str), "\r\n")
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
go
| 1 | 0 | |
internal/server/api.go
|
/*
Copyright 2020 GramLabs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"context"
"net/http"
"os"
"strings"
"time"
"github.com/thestormforge/optimize-controller/v2/internal/version"
"github.com/thestormforge/optimize-go/pkg/api"
applications "github.com/thestormforge/optimize-go/pkg/api/applications/v2"
experimentsv1alpha1 "github.com/thestormforge/optimize-go/pkg/api/experiments/v1alpha1"
"github.com/thestormforge/optimize-go/pkg/config"
"golang.org/x/oauth2"
)
func NewExperimentAPI(ctx context.Context, uaComment string) (experimentsv1alpha1.API, error) {
client, err := newClientFromConfig(ctx, uaComment, func(srv config.Server) string {
return strings.TrimSuffix(srv.API.ExperimentsEndpoint, "/v1/experiments/")
})
if err != nil {
return nil, err
}
expAPI := experimentsv1alpha1.NewAPI(client)
// An unauthorized error means we will never be able to connect without changing the credentials and restarting
if _, err := expAPI.CheckEndpoint(ctx); api.IsUnauthorized(err) {
return nil, err
}
return expAPI, nil
}
func NewApplicationAPI(ctx context.Context, uaComment string) (applications.API, error) {
client, err := newClientFromConfig(ctx, uaComment, func(srv config.Server) string {
return strings.TrimSuffix(srv.API.ApplicationsEndpoint, "/v2/applications/")
})
if err != nil {
return nil, err
}
appAPI := applications.NewAPI(client)
if _, err := appAPI.CheckEndpoint(ctx); api.IsUnauthorized(err) {
return nil, err
}
return appAPI, nil
}
func newClientFromConfig(ctx context.Context, uaComment string, address func(config.Server) string) (api.Client, error) {
// Load the configuration
cfg := &config.OptimizeConfig{}
// TODO This is temporary while we migrate the audience value
if aud := os.Getenv("STORMFORGE_AUTHORIZATION_AUDIENCE"); aud != "" {
cfg.AuthorizationParameters = map[string][]string{
"audience": {aud},
}
}
if err := cfg.Load(); err != nil {
return nil, err
}
// Get the Experiments API endpoint from the configuration
// NOTE: The current version of the configuration has an explicit configuration for the
// experiments endpoint which would duplicate the "/experiments/" path segment
srv, err := config.CurrentServer(cfg.Reader())
if err != nil {
return nil, err
}
// Update the context to include an explicit OAuth2 client which includes a
// partial UA string (no comment for authorization requests) and a lower
// timeout then what SF client will use
ctx = context.WithValue(ctx, oauth2.HTTPClient, &http.Client{
Transport: version.UserAgent("optimize-pro", "", nil),
Timeout: 5 * time.Second,
})
rt, err := cfg.Authorize(ctx, version.UserAgent("optimize-pro", uaComment, nil))
if err != nil {
return nil, err
}
// Create a new API client
c, err := api.NewClient(address(srv), rt)
if err != nil {
return nil, err
}
return c, nil
}
|
[
"\"STORMFORGE_AUTHORIZATION_AUDIENCE\""
] |
[] |
[
"STORMFORGE_AUTHORIZATION_AUDIENCE"
] |
[]
|
["STORMFORGE_AUTHORIZATION_AUDIENCE"]
|
go
| 1 | 0 | |
Lib/test/test_posixpath.py
|
import unittest
from test import test_support
import posixpath, os
class PosixPathTest(unittest.TestCase):
def assertIs(self, a, b):
self.assert_(a is b)
def test_normcase(self):
# Check that normcase() is idempotent
p = "FoO/./BaR"
p = posixpath.normcase(p)
self.assertEqual(p, posixpath.normcase(p))
self.assertRaises(TypeError, posixpath.normcase)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"), "/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"), "/foo/bar/baz/")
self.assertRaises(TypeError, posixpath.join)
def test_splitdrive(self):
self.assertEqual(posixpath.splitdrive("/foo/bar"), ("", "/foo/bar"))
self.assertRaises(TypeError, posixpath.splitdrive)
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
self.assertRaises(TypeError, posixpath.split)
def test_splitext(self):
self.assertEqual(posixpath.splitext("foo.ext"), ("foo", ".ext"))
self.assertEqual(posixpath.splitext("/foo/foo.ext"), ("/foo/foo", ".ext"))
self.assertEqual(posixpath.splitext(".ext"), ("", ".ext"))
self.assertEqual(posixpath.splitext("/foo.ext/foo"), ("/foo.ext/foo", ""))
self.assertEqual(posixpath.splitext("foo.ext/"), ("foo.ext/", ""))
self.assertEqual(posixpath.splitext(""), ("", ""))
self.assertEqual(posixpath.splitext("foo.bar.ext"), ("foo.bar", ".ext"))
self.assertRaises(TypeError, posixpath.splitext)
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
self.assertRaises(TypeError, posixpath.isabs)
def test_splitdrive(self):
self.assertEqual(posixpath.splitdrive("/foo/bar"), ("", "/foo/bar"))
self.assertRaises(TypeError, posixpath.splitdrive)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
self.assertRaises(TypeError, posixpath.basename)
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
self.assertRaises(TypeError, posixpath.dirname)
def test_commonprefix(self):
self.assertEqual(
posixpath.commonprefix([]),
""
)
self.assertEqual(
posixpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
"/home/swen"
)
self.assertEqual(
posixpath.commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
"/home/swen/"
)
self.assertEqual(
posixpath.commonprefix(["/home/swen/spam", "/home/swen/spam"]),
"/home/swen/spam"
)
def test_getsize(self):
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertEqual(posixpath.getsize(test_support.TESTFN), 3)
finally:
if not f.closed:
f.close()
os.remove(test_support.TESTFN)
def test_time(self):
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
f = open(test_support.TESTFN, "ab")
f.write("bar")
f.close()
f = open(test_support.TESTFN, "rb")
d = f.read()
f.close()
self.assertEqual(d, "foobar")
self.assert_(
posixpath.getctime(test_support.TESTFN) <=
posixpath.getmtime(test_support.TESTFN)
)
finally:
if not f.closed:
f.close()
os.remove(test_support.TESTFN)
def test_islink(self):
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
os.remove(test_support.TESTFN + "1")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(test_support.TESTFN + "2"), False)
finally:
if not f.close():
f.close()
try:
os.remove(test_support.TESTFN + "1")
except os.error:
pass
try:
os.remove(test_support.TESTFN + "2")
except os.error:
pass
self.assertRaises(TypeError, posixpath.islink)
def test_exists(self):
self.assertIs(posixpath.exists(test_support.TESTFN), False)
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.exists(test_support.TESTFN), True)
finally:
if not f.close():
f.close()
try:
os.remove(test_support.TESTFN)
except os.error:
pass
self.assertRaises(TypeError, posixpath.exists)
def test_isdir(self):
self.assertIs(posixpath.isdir(test_support.TESTFN), False)
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.isdir(test_support.TESTFN), False)
os.remove(test_support.TESTFN)
os.mkdir(test_support.TESTFN)
self.assertIs(posixpath.isdir(test_support.TESTFN), True)
os.rmdir(test_support.TESTFN)
finally:
if not f.close():
f.close()
try:
os.remove(test_support.TESTFN)
except os.error:
pass
try:
os.rmdir(test_support.TESTFN)
except os.error:
pass
self.assertRaises(TypeError, posixpath.isdir)
def test_isfile(self):
self.assertIs(posixpath.isfile(test_support.TESTFN), False)
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.isfile(test_support.TESTFN), True)
os.remove(test_support.TESTFN)
os.mkdir(test_support.TESTFN)
self.assertIs(posixpath.isfile(test_support.TESTFN), False)
os.rmdir(test_support.TESTFN)
finally:
if not f.close():
f.close()
try:
os.remove(test_support.TESTFN)
except os.error:
pass
try:
os.rmdir(test_support.TESTFN)
except os.error:
pass
self.assertRaises(TypeError, posixpath.isdir)
def test_samefile(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "1"
),
True
)
# If we don't have links, assume that os.stat doesn't return resonable
# inode information and thus, that samefile() doesn't work
if hasattr(os, "symlink"):
os.symlink(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
)
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
False
)
finally:
if not f.close():
f.close()
try:
os.remove(test_support.TESTFN + "1")
except os.error:
pass
try:
os.remove(test_support.TESTFN + "2")
except os.error:
pass
self.assertRaises(TypeError, posixpath.samefile)
def test_samestat(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "1")
),
True
)
# If we don't have links, assume that os.stat() doesn't return resonable
# inode information and thus, that samefile() doesn't work
if hasattr(os, "symlink"):
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
False
)
finally:
if not f.close():
f.close()
try:
os.remove(test_support.TESTFN + "1")
except os.error:
pass
try:
os.remove(test_support.TESTFN + "2")
except os.error:
pass
self.assertRaises(TypeError, posixpath.samestat)
def test_ismount(self):
if os.name in ('mac',):
return
self.assertIs(posixpath.ismount("/"), True)
self.assertRaises(TypeError, posixpath.ismount)
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
try:
import pwd
except ImportError:
pass
else:
self.assert_(isinstance(posixpath.expanduser("~/"), basestring))
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assert_(isinstance(posixpath.expanduser("~root/"), basestring))
self.assert_(isinstance(posixpath.expanduser("~foo/"), basestring))
self.assertRaises(TypeError, posixpath.expanduser)
def test_expandvars(self):
oldenv = os.environ.copy()
try:
os.environ.clear()
os.environ["foo"] = "bar"
os.environ["{foo"] = "baz1"
os.environ["{foo}"] = "baz2"
self.assertEqual(posixpath.expandvars("foo"), "foo")
self.assertEqual(posixpath.expandvars("$foo bar"), "bar bar")
self.assertEqual(posixpath.expandvars("${foo}bar"), "barbar")
self.assertEqual(posixpath.expandvars("$[foo]bar"), "$[foo]bar")
self.assertEqual(posixpath.expandvars("$bar bar"), "$bar bar")
self.assertEqual(posixpath.expandvars("$?bar"), "$?bar")
self.assertEqual(posixpath.expandvars("${foo}bar"), "barbar")
self.assertEqual(posixpath.expandvars("$foo}bar"), "bar}bar")
self.assertEqual(posixpath.expandvars("${foo"), "${foo")
self.assertEqual(posixpath.expandvars("${{foo}}"), "baz1}")
finally:
os.environ.clear()
os.environ.update(oldenv)
self.assertRaises(TypeError, posixpath.expandvars)
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"), "/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
self.assertRaises(TypeError, posixpath.normpath)
def test_abspath(self):
self.assert_("foo" in posixpath.abspath("foo"))
self.assertRaises(TypeError, posixpath.abspath)
def test_realpath(self):
self.assert_("foo" in posixpath.realpath("foo"))
self.assertRaises(TypeError, posixpath.realpath)
def test_main():
test_support.run_unittest(PosixPathTest)
if __name__=="__main__":
test_main()
|
[] |
[] |
[
"{foo",
"foo",
"{foo}"
] |
[]
|
["{foo", "foo", "{foo}"]
|
python
| 3 | 0 | |
pkg/types/types.go
|
package types
import (
"os"
"strings"
)
const (
// DefaultFeature filter is for the features which are mandatory
DefaultFeature = "default"
// GitHubFeature filter enables secrets created with the scm_github filter
GitHubFeature = "scm_github"
// GitLabFeature filter is the feature which enables secret creation for GitLab
GitLabFeature = "scm_gitlab"
// Auth filter enables OAuth secret creation
Auth = "auth"
// GCPDNS filter enables the creation of secrets for Google Cloud Platform DNS when TLS is enabled
GCPDNS = "gcp_dns01"
// DODNS filter enables the creation of secrets for Digital Ocean DNS when TLS is enabled
DODNS = "do_dns01"
// Route53DNS filter enables the creation of secrets for Amazon Route53 DNS when TLS is enabled
Route53DNS = "route53_dns01"
// CloudDNS is the dns_service field in yaml file for Google Cloud Platform
CloudDNS = "clouddns"
// DigitalOcean is the dns_service field in yaml file for Digital Ocean
DigitalOcean = "digitalocean"
// Route53 is the dns_service field in yaml file for Amazon
Route53 = "route53"
// GitLabSCM repository manager name as displayed in the init.yaml file
GitLabSCM = "gitlab"
// GitHubSCM repository manager name as displayed in the init.yaml file
GitHubSCM = "github"
// ECRFeature enable ECR
ECRFeature = "ecr"
)
type Plan struct {
Features []string `yaml:"features"`
Orchestration string `yaml:"orchestration"`
Secrets []KeyValueNamespaceTuple `yaml:"secrets"`
RootDomain string `yaml:"root_domain"`
Registry string `yaml:"registry"`
CustomersURL string `yaml:"customers_url"`
SCM string `yaml:"scm"`
Github Github `yaml:"github"`
Gitlab Gitlab `yaml:"gitlab"`
TLS bool `yaml:"tls"`
OAuth OAuth `yaml:"oauth"`
S3 S3 `yaml:"s3"`
EnableOAuth bool `yaml:"enable_oauth"`
TLSConfig TLSConfig `yaml:"tls_config"`
Slack Slack `yaml:"slack"`
Ingress string `yaml:"ingress"`
Deployment Deployment `yaml:"deployment"`
EnableDockerfileLang bool `yaml:"enable_dockerfile_lang"`
ScaleToZero bool `yaml:"scale_to_zero"`
OpenFaaSCloudVersion string `yaml:"openfaas_cloud_version"`
NetworkPolicies bool `yaml:"network_policies"`
BuildBranch string `yaml:"build_branch"`
EnableECR bool `yaml:"enable_ecr"`
}
// Deployment is the deployment section of YAML concerning
// functions as deployed
type Deployment struct {
CustomTemplate []string `yaml:"custom_templates"`
}
// FormatCustomTemplates are formatted in a CSV format with a space after each comma
func (d Deployment) FormatCustomTemplates() string {
val := ""
for _, templateURL := range d.CustomTemplate {
val = val + templateURL + ", "
}
return strings.TrimRight(val, " ,")
}
type KeyValueTuple struct {
Name string `yaml:"name"`
Value string `yaml:"value"`
}
type FileSecret struct {
Name string `yaml:"name"`
ValueFrom string `yaml:"value_from"`
// ValueCommand is a command to execute to generate
// a secret file specified in ValueFrom
ValueCommand string `yaml:"value_command"`
}
// ExpandValueFrom expands ~ to the home directory of the current user
// kept in the HOME env-var.
func (fs FileSecret) ExpandValueFrom() string {
value := fs.ValueFrom
value = strings.Replace(value, "~", os.Getenv("HOME"), -1)
return value
}
type KeyValueNamespaceTuple struct {
Name string `yaml:"name"`
Literals []KeyValueTuple `yaml:"literals"`
Namespace string `yaml:"namespace"`
Files []FileSecret `yaml:"files"`
Type string `yaml:"type"`
Filters []string `yaml:"filters"`
}
type Github struct {
AppID string `yaml:"app_id"`
PrivateKeyFile string `yaml:"private_key_filename"`
}
type Gitlab struct {
GitLabInstance string `yaml:"gitlab_instance"`
}
type Slack struct {
URL string `yaml:"url"`
}
type OAuth struct {
ClientId string `yaml:"client_id"`
OAuthProviderBaseURL string `yaml:"oauth_provider_base_url"`
}
type S3 struct {
Url string `yaml:"s3_url"`
Region string `yaml:"s3_region"`
TLS bool `yaml:"s3_tls"`
Bucket string `yaml:"s3_bucket"`
}
type TLSConfig struct {
Email string `yaml:"email"`
DNSService string `yaml:"dns_service"`
ProjectID string `yaml:"project_id"`
IssuerType string `yaml:"issuer_type"`
Region string `yaml:"region"`
AccessKeyID string `yaml:"access_key_id"`
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
adamspy/adamspy.py
|
"""Functions for manipulating adams files and data
"""
import os
import re
import subprocess
import platform
XMT_PATTERN = re.compile('\\s*file_name\\s*=\\s*"?.+\\.xmt_txt"?\\s*')
def get_simdur_from_msg(msg_file):
"""Reads an Adams message file (.msg) and returns the total duration of the simulation.
Parameters
----------
msg_file : str
Filepath to an Adams message file (.msg)
Raises
------
RuntimeError
Returned if no simulation end time was found in the specified message file
Returns
-------
float
Total duration of the simulation
"""
found = False
with open(msg_file, 'r') as fid:
for line in fid:
if re.match(' *command: sim(ulate)?/dyn(anmic)?.*, *end *=.*', line.lower()):
duration = float(re.split('end *=',line.lower().replace(' ',''))[-1].split(',')[0])
found=True
# Raise an error if no duration found
if not found:
raise RuntimeError('No simulation end time was found in the specified message file!')
return duration
def get_simdur_from_acf(acf_file):
"""Reads an Adams command file (.acf) and returns the total duration of the simulation.
Parameters
----------
acf_file : str
Filepath to an Adams message file (.msg)
Raises
------
RuntimeError
Returned if no simulation end time was found in the specified message file
Returns
-------
float
Total duration of the simulation
"""
found = False
with open(acf_file, 'r') as fid:
for line in fid:
if re.match('sim(ulate)?/dyn(anmic)?.*, *end *=.*', line.lower()):
duration = float(re.split('end *=',line.lower().replace(' ',''))[-1].split(',')[0])
found=True
# Raise an error if no duration found
if not found:
raise RuntimeError('No simulation end time was found in the specified acf file!')
return duration
def modify_xmt_path(cmd_file, new_xmt_path):
with open(cmd_file, 'r') as fid_in, open(cmd_file.replace('.cmd', '.tmp'), 'w') as fid_out:
for line in fid_in:
if XMT_PATTERN.match(line) is None:
fid_out.write(line)
else:
line_parts = line.split('"')
fid_out.write('"'.join([line_parts[0], new_xmt_path, line_parts[-1]]))
original_path = line_parts[1]
os.remove(cmd_file)
os.rename(cmd_file.replace('.cmd', '.tmp'), cmd_file)
return original_path
def get_n_threads(adm_file):
"""Searches `adm_file` for the NTHREADS statement and returns its value.
Parameters
----------
adm_file : str
Path to an Adams Dataset (.adm) file
Returns
-------
int
Number of threads set `adm_file`
"""
n_threads = 1
with open(adm_file, 'r') as fid:
for line in fid:
# If at the NTHREADS statement, rewrite it
if re.match('^,[ \\t]*nthreads[ \\t]*=[ \\t]*\\d$', line, flags=re.I):
n_threads = int(line.split('=')[1].strip())
return n_threads
def set_n_threads(adm_file, n_threads):
"""Changes or creates the NTHREADS option on the PREFERENCES statement in `adm_file`.
Parameters
----------
adm_file : str
File path to an Adams Dataset (.adm) file
n_threads : int
Number of threads to use when running the model specified in `adm_file`
"""
found = False
with open(adm_file, 'r') as fid_old, open(adm_file + '.tmp', 'w') as fid_new:
for line in fid_old:
# If at the NTHREADS statement, rewrite it
if re.match('^,[ \\t]*nthreads[ \\t]*=[ \\t]*\\d$', line, flags=re.I):
fid_new.write(f', NTHREADS = {n_threads}\n')
found = True
# If the end is reached and the NTHREADS statement isn't found, create it
elif re.match('^end[ \\t]*$', line, re.I) and not found:
fid_new.write(f'PREFERENCES/\n, NTHREADS = {n_threads}\n!\n')
fid_new.write(line)
# If at a normal line, write it
else:
fid_new.write(line)
# Delete the old adm file and replace with modified
os.remove(adm_file)
os.rename(adm_file + '.tmp', adm_file)
def solve(acf_file, wait=False, use_adams_car=False):
"""Runs Adams Solver to solve the model specified in `acf_file`
Parameters
----------
acf_file : str
Path to an Adams Command (.acf) File
"""
file = os.path.split(acf_file)[-1]
cwd = os.path.dirname(acf_file) if os.path.dirname(acf_file) != '' else os.getcwd()
if platform.system() == 'Windows':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
if use_adams_car is False:
command = '"{}" ru-s "{}"'.format(os.environ['ADAMS_LAUNCH_COMMAND'], file)
else:
command = '"{}" acar ru-solver "{}"'.format(os.environ['ADAMS_LAUNCH_COMMAND'], file)
proc = subprocess.Popen(command, cwd=cwd, startupinfo=startupinfo)
else:
if use_adams_car is False:
command = [os.environ['ADAMS_LAUNCH_COMMAND'], '-c', 'ru-standard', 'i', file, 'exit']
else:
command = [os.environ['ADAMS_LAUNCH_COMMAND'], '-c', 'acar', 'ru-solver', 'i', file, 'exit']
proc = subprocess.Popen(command, cwd=cwd)
if wait:
proc.wait()
return proc
class AdmFileError(Exception):
pass
|
[] |
[] |
[
"ADAMS_LAUNCH_COMMAND"
] |
[]
|
["ADAMS_LAUNCH_COMMAND"]
|
python
| 1 | 0 | |
tests/test_create_group.py
|
import eodslib
import os
from pathlib import Path
import pytest
import os
@pytest.mark.skip_real()
def test_create(set_output_dir, modify_id_list, unique_run_string):
output_dir = set_output_dir
conn = {
'domain': os.getenv("HOST"),
'username': os.getenv("API_USER"),
'access_token': os.getenv("API_TOKEN"),
}
eods_params = {
'output_dir':output_dir,
'title':'keep_api_test_create_group',
'verify': False,
#'limit':1,
}
list_of_layers, _ = eodslib.query_catalog(conn, **eods_params)
os.rename(output_dir / 'eods-query-all-results.csv', output_dir / 'eods-query-all-results-create-group-test.csv')
errors = []
response_json = eodslib.create_layer_group(
conn,
list_of_layers,
'eodslib-create-layer-test-' + unique_run_string,
abstract='some description of the layer group ' + unique_run_string
)
if not modify_id_list:
modify_id_list.append(response_json['id'])
lower_unique_run_string = unique_run_string.lower().replace('-', '_').replace('/', '').replace(':', '')
# content checks
if len(response_json['layers']) != 1:
errors.append(f"Content Error: \'layers\' in response text should contain only 1 layers, got {len(response_json['layers'])} layers")
if response_json['layers'][0] != 'geonode:keep_api_test_create_group':
errors.append(f"Content Error: 1st layer of \'layers\' in response text should be \'geonode:keep_api_test_create_group\', it was \'{response_json['layers'][0]}\'")
if response_json['abstract'] != 'some description of the layer group ' + unique_run_string:
errors.append(f"Content Error: \'abstract\' in response text should be \'some description of the layer group {unique_run_string}\', it was \'{response_json['abstract']}\'")
if response_json['alternate'] != 'geonode:eodslib_create_layer_test_' + lower_unique_run_string:
errors.append(f"Content Error: \'alternate\' in response text should be \'geonode:eodslib_create_layer_test_{lower_unique_run_string}\', it was \'{response_json['alternate']}\'")
if response_json['name'] != 'eodslib_create_layer_test_' + lower_unique_run_string:
errors.append(f"Content Error: \'name\' in response text should be \'eodslib_create_layer_test_{lower_unique_run_string}\', it was \'{response_json['name']}\'")
if response_json['title'] != 'eodslib-create-layer-test-' + unique_run_string:
errors.append(f"Content Error: \'title\' in response text should be \'eodslib-create-layer-test-{unique_run_string}\', it was \'{response_json['title']}\'")
assert not errors
|
[] |
[] |
[
"HOST",
"API_USER",
"API_TOKEN"
] |
[]
|
["HOST", "API_USER", "API_TOKEN"]
|
python
| 3 | 0 | |
testserver.py
|
#!/usr/bin/env python
import os, sys
if __name__ == '__main__':
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.project.settings'
from django.core.management import execute_from_command_line
execute_from_command_line(['./testserver.py', 'runserver'])
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
Integrations/AzureSecurityCenter/AzureSecurityCenter.py
|
from CommonServerPython import *
""" IMPORTS """
import requests
import ast
from datetime import datetime
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
# remove proxy if not set to true in params
if not demisto.params().get("proxy"):
del os.environ["HTTP_PROXY"]
del os.environ["HTTPS_PROXY"]
del os.environ["http_proxy"]
del os.environ["https_proxy"]
""" GLOBAL VARS """
CONTEXT = demisto.getIntegrationContext()
USE_SSL = not demisto.params().get("unsecure", False)
DEMISTOBOT = "https://demistobot.demisto.com/azuresc-token"
SUBSCRIPTION_ID = CONTEXT.get("subscription_id")
SUBSCRIPTION_URL = "/subscriptions/{}".format(SUBSCRIPTION_ID)
TOKEN = demisto.params().get("token")
TENANT_ID = demisto.params().get("tenant_id")
BASE_URL = demisto.params().get("server_url")
RESOURCE = "https://management.azure.com/"
AUTH_GRANT_TYPE = "client_credentials"
# API Versions
ALERT_API_VERSION = "2015-06-01-preview"
LOCATION_API_VERSION = "2015-06-01-preview"
ATP_API_VERSION = "2017-08-01-preview"
APS_API_VERSION = "2017-08-01-preview"
IPP_API_VERSION = "2017-08-01-preview"
JIT_API_VERSION = "2015-06-01-preview"
STORAGE_API_VERSION = "2018-07-01"
""" HELPER FUNCTIONS """
def set_subscription_id():
"""
Setting subscription ID to the context and returning it
"""
headers = {"Authorization": TOKEN, "Accept": "application/json"}
params = {"tenant": TENANT_ID, "product": "AzureSecurityCenter"}
r = requests.get(DEMISTOBOT, headers=headers, params=params, verify=USE_SSL)
try:
data = r.json()
if r.status_code != requests.codes.ok:
return_error(
"Error in API call to Azure Security Center [{}] - {}".format(
r.status_code, r.text
)
)
sub_id = data.get("subscription_id")
demisto.setIntegrationContext(
{
"token": data.get("token"),
"stored": epoch_seconds(),
"subscription_id": sub_id,
}
)
return sub_id
except ValueError:
return_error("There was problem with your request: {}".format(r.content))
def epoch_seconds(d=None):
"""
Return the number of seconds for given date. If no date, return current.
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def get_token():
"""
Check if we have a valid token and if not get one
"""
token = CONTEXT.get("token")
stored = CONTEXT.get("stored")
if token and stored:
if epoch_seconds() - stored < 60 * 60 - 30:
return token
headers = {"Authorization": TOKEN, "Accept": "application/json"}
r = requests.get(
DEMISTOBOT,
headers=headers,
params={"tenant": TENANT_ID, "product": "AzureSecurityCenter"},
verify=USE_SSL,
)
data = r.json()
if r.status_code != requests.codes.ok:
return_error(
"Error in API call to Azure Security Center [{}] - {}".format(
r.status_code, r.text
)
)
demisto.setIntegrationContext(
{
"token": data.get("token"),
"stored": epoch_seconds(),
"subscription_id": data.get("subscription_id"),
}
)
return data.get("token")
def http_request(method, url_suffix, body=None, params=None, add_subscription=True):
"""
Generic request to the graph
"""
token = get_token()
headers = {
"Authorization": "Bearer " + token,
"Content-Type": "application/json",
"Accept": "application/json",
}
if add_subscription:
url = BASE_URL + SUBSCRIPTION_URL + url_suffix
else:
url = BASE_URL + url_suffix
r = requests.request(method, url, json=body, params=params, headers=headers)
if r.status_code not in {200, 201, 202, 204}:
return_error(
"Error in API call to Azure Security Center [{}] - {}".format(
r.status_code, r.text
)
)
try:
r = r.json()
return r
except ValueError:
return dict()
# Format ports in JIT access policy rule to (portNum, protocol, allowedAddress, maxDuration)
def format_jit_port_rule(ports):
port_array = list()
for port in ports:
# for each item in unicode, has to use str to decode to ascii
p_num = str(port.get("number"))
p_src_addr = (
str(port.get("allowedSourceAddressPrefix"))
if port.get("allowedSourceAddressPrefix") != "*"
else "any"
)
p_protocol = str(port.get("protocol")) if port.get("protocol") != "*" else "any"
p_max_duration = str(port.get("maxRequestAccessDuration"))
port_array.append(str((p_num, p_protocol, p_src_addr, p_max_duration)))
return ", ".join(port_array)
# Format ports in JIT access request to (portNum, allowedAddress, endTime, status)
def format_jit_port_request(ports):
port_array = list()
for port in ports:
# for each item in unicode, has to use str to decode to ascii
p_num = str(port.get("number"))
p_src_addr = (
str(port.get("allowedSourceAddressPrefix"))
if port.get("allowedSourceAddressPrefix") != "*"
else "any"
)
p_status = str(port.get("status"))
p_end_time = str(port.get("endTimeUtc"))
port_array.append(str((p_num, p_src_addr, p_end_time, p_status)))
return ", ".join(port_array)
def normalize_context_key(string):
"""Normalize context keys
Function will normalize the string (remove white spaces and tailings)
Args:
string (str):
Returns:
Normalized string
"""
tmp = string[:1].upper() + string[1:]
return tmp.replace(" ", "")
""" FUNCTIONS """
""" Alert Start """
def get_alert_command(args):
"""Getting specified alert from API
Args
args (dict): dictionary containing commands args
"""
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
alert_id = args.get("alert_id")
alert = get_alert(resource_group_name, asc_location, alert_id)
final_output = list()
# Basic Property Table
properties = alert.get("properties")
if properties:
basic_table_output = [
{
"DisplayName": properties.get("alertDisplayName"),
"CompromisedEntity": properties.get("compromisedEntity"),
"Description": properties.get("description"),
"DetectedTime": properties.get("detectedTimeUtc"),
"ReportedTime": properties.get("reportedTimeUtc"),
"ReportedSeverity": properties.get("reportedSeverity"),
"ConfidenceScore": properties.get("confidenceScore", "None"),
"State": properties.get("state"),
"ActionTaken": properties.get("actionTaken"),
"CanBeInvestigated": properties.get("canBeInvestigated"),
"RemediationSteps": properties.get("remediationSteps"),
"VendorName": properties.get("vendorName"),
"AssociatedResource": properties.get("associatedResource"),
"AlertName": properties.get("alertName"),
"InstanceID": properties.get("instanceId", "None"),
"ID": alert.get("name"),
"ExtendedProperties": properties.get("extendedProperties"),
"Entities": properties.get("entities"),
"SubscriptionID": properties.get("subscriptionId"),
}
]
md = tableToMarkdown(
"Azure Security Center - Get Alert - Basic Property",
basic_table_output,
[
"DisplayName",
"CompromisedEntity",
"Description",
"DetectedTime",
"ReportedTime",
"ReportedSeverity",
"ConfidenceScore",
"State",
"ActionTaken",
"CanBeInvestigated",
"RemediationSteps",
"VendorName",
"AssociatedResource",
"AlertName",
"InstanceID",
"ID",
],
removeNull=True,
)
ec = {
"AzureSecurityCenter.Alert(val.ID && val.ID === obj.ID)": basic_table_output
}
basic_table_entry = {
"Type": entryTypes["note"],
"Contents": alert,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
final_output.append(basic_table_entry)
# Extended Properties Table
if (
alert.get("properties")
and alert.get("properties")
and alert.get("properties").get("extendedProperties")
):
extended_properties = dict()
properties = alert.get("properties")
if isinstance(properties.get("extendedProperties"), dict):
for key, value in alert["properties"]["extendedProperties"].items():
extended_properties[normalize_context_key(key)] = value
extended_table_entry = {
"Type": entryTypes["note"],
"Contents": alert["properties"]["extendedProperties"],
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": tableToMarkdown(
"Azure Security Center - Get Alert - Extended Property",
extended_properties,
removeNull=True,
),
}
final_output.append(extended_table_entry)
# Entities Table
entities = properties.get("entities")
if entities:
if isinstance(entities, dict):
entities_table_output = list()
for entity in entities:
entities_table_output.append(
{
"Content": ast.literal_eval(str(entity)),
"Type": entity["type"],
}
)
md = tableToMarkdown(
"Azure Security Center - Get Alert - Entity",
entities_table_output,
removeNull=True,
)
entities_table_entry = {
"Type": entryTypes["note"],
"Contents": alert.get("properties").get("entities"),
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
}
final_output.append(entities_table_entry)
demisto.results(final_output)
def get_alert(resource_group_name, asc_location, alert_id):
"""Building query
Args:
resource_group_name (str): ResourceGroupName
asc_location (str): Azure Security Center location
alert_id (str): Alert ID
Returns:
response body (dict)
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}".format(resource_group_name)
cmd_url += "/providers/Microsoft.Security/locations/{}/alerts/{}?api-version={}".format(
asc_location, alert_id, ALERT_API_VERSION
)
response = http_request("GET", cmd_url)
return response
def list_alerts_command(args):
"""Getting all alerts
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
filter_query = args.get("filter")
select_query = args.get("select")
expand_query = args.get("expand")
alerts = list_alerts(
resource_group_name, asc_location, filter_query, select_query, expand_query
).get("value")
outputs = list()
for alert in alerts:
properties = alert.get("properties")
if properties:
outputs.append(
{
"DisplayName": properties.get("alertDisplayName"),
"CompromisedEntity": properties.get("compromisedEntity"),
"DetectedTime": properties.get("detectedTimeUtc"),
"ReportedSeverity": properties.get("reportedSeverity"),
"State": properties.get("state"),
"ActionTaken": properties.get("actionTaken"),
"Description": properties.get("description"),
"ID": alert.get("name"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Alerts",
outputs,
[
"DisplayName",
"CompromisedEntity",
"DetectedTime",
"ReportedSeverity",
"State",
"ActionTaken",
"Description",
"ID",
],
removeNull=True,
)
ec = {"AzureSecurityCenter.Alert(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": alerts,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def get_alerts(
resource_group_name, asc_location, filter_query, select_query, expand_query
):
"""Building query
Args:
resource_group_name (str): ResourceGroupName
asc_location (str): Azure Security Center location
filter_query (str): what to filter
select_query (str): what to select
expand_query (str): what to expand
Returns:
dict: contains response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}/providers/Microsoft.Security".format(
resource_group_name
)
# ascLocation muse be using with specifying resourceGroupName
if asc_location:
cmd_url += "/locations/{}".format(asc_location)
else:
cmd_url += "/providers/Microsoft.Security"
cmd_url += "/alerts?api-version={}".format(ALERT_API_VERSION)
if filter_query:
cmd_url += "&$filter={}".format(filter_query)
if select_query:
cmd_url += "&$select={}".format(select_query)
if expand_query:
cmd_url += "&$expand={}".format(expand_query)
response = http_request("GET", cmd_url)
return response
def list_alerts(
resource_group_name, asc_location, filter_query, select_query, expand_query
):
"""Listing alerts
Args:
resource_group_name (str): ResourceGroupName
asc_location (str): Azure Security Center location
filter_query (str): what to filter
select_query (str): what to select
expand_query (str): what to expand
Returns:
dict: contains response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}/providers/Microsoft.Security".format(
resource_group_name
)
# ascLocation must be using with specifying resourceGroupName
if asc_location:
cmd_url += "/locations/{}".format(asc_location)
else:
cmd_url += "/providers/Microsoft.Security"
cmd_url += "/alerts?api-version={}".format(ALERT_API_VERSION)
if filter_query:
cmd_url += "&$filter={}".format(filter_query)
if select_query:
cmd_url += "&$select={}".format(select_query)
if expand_query:
cmd_url += "&$expand={}".format(expand_query)
response = http_request("GET", cmd_url)
return response
def update_alert_command(args):
"""Update given alert
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
alert_id = args.get("alert_id")
alert_update_action_type = args.get("alert_update_action_type")
response = update_alert(
resource_group_name, asc_location, alert_id, alert_update_action_type
)
outputs = {"ID": response.get("id"), "ActionTaken": alert_update_action_type}
ec = {"AzureSecurityCenter.Alert(val.ID && val.ID === obj.ID)": outputs}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": "Alert - {} has been set to {}.".format(
alert_id, alert_update_action_type
),
"ContentsFormat": formats["text"],
"EntryContext": ec,
}
)
def update_alert(resource_group_name, asc_location, alert_id, alert_update_action_type):
"""Building query
Args:
resource_group_name (str): Resource Name Group
asc_location (str): Azure Security Center Location
alert_id (str): Alert ID
alert_update_action_type (str): What update type need to update
Returns:
dict: response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}".format(resource_group_name)
cmd_url += "/providers/Microsoft.Security/locations/{}/alerts/{}/{}?api-version={}".format(
asc_location, alert_id, alert_update_action_type, ALERT_API_VERSION
)
return http_request("POST", cmd_url)
""" Alert End """
""" Location Start """
def list_locations_command():
"""Getting all locations
"""
locations = list_locations().get("value")
outputs = list()
if locations:
for location in locations:
if location.get("properties") and location.get("properties").get(
"homeRegionName"
):
home_region_name = location.get("properties").get("homeRegionName")
else:
home_region_name = None
outputs.append(
{
"HomeRegionName": home_region_name,
"Name": location.get("name"),
"ID": location.get("id"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Locations",
outputs,
["HomeRegionName", "Name", "ID"],
removeNull=True,
)
ec = {"AzureSecurityCenter.Location(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": locations,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
else:
demisto.results("No locations found")
def list_locations():
"""Building query
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Security/locations?api-version={}".format(
LOCATION_API_VERSION
)
response = http_request("GET", cmd_url)
return response
""" Location End """
""" Advanced Threat Protection Start """
def update_atp_command(args):
"""Updating given Advanced Threat Protection (enable/disable)
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
setting_name = args.get("setting_name")
is_enabled = args.get("is_enabled")
storage_account = args.get("storage_account")
response = update_atp(
resource_group_name, storage_account, setting_name, is_enabled
)
outputs = {
"ID": response.get("id"),
"Name": response.get("name"),
"IsEnabled": response.get("properties").get("is_enabled"),
}
md = tableToMarkdown(
"Azure Security Center - Update Advanced Threat Detection Setting",
outputs,
["ID", "Name", "IsEnabled"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AdvancedThreatProtection(val.ID && val.ID === obj.ID)": outputs
}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": response,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
)
def update_atp(resource_group_name, storage_account, setting_name, is_enabled):
"""Building query
Args:
resource_group_name (str): Resource Group Name
storage_account (str): Storange Account
setting_name (str): Setting Name
is_enabled (str): true/false
Returns:
dict: respones body
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}"
"/providers/Microsoft.Security/advancedThreatProtectionSettings/{}?api-version={}".format(
resource_group_name, storage_account, setting_name, ATP_API_VERSION
)
)
data = {
"id": "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage"
"/storageAccounts/{}/providers/Microsoft.Security/advancedThreatProtectionSettings/{}".format(
SUBSCRIPTION_ID, resource_group_name, storage_account, setting_name
),
"name": setting_name,
"type": "Microsoft.Security/advancedThreatProtectionSettings",
"properties": {"is_enabled": is_enabled},
}
response = http_request("PUT", cmd_url, body=data)
return response
def get_atp_command(args):
"""Get given Advanced Threat Protection settings
Args:
args (dict): usually demisto.args()
"""
resource_group_name = args.get("resource_group_name")
setting_name = args.get("setting_name")
storage_account = args.get("storage_account")
response = get_atp(resource_group_name, storage_account, setting_name)
outputs = {
"ID": response.get("id"),
"Name": response.get("name"),
"IsEnabled": response["properties"]["isEnabled"]
if response.get("properties") and response.get("properties").get("isEnabled")
else None,
}
md = tableToMarkdown(
"Azure Security Center - Get Advanced Threat Detection Setting",
outputs,
["ID", "Name", "IsEnabled"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AdvancedThreatProtection(val.ID && val.ID === obj.ID)": outputs
}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": response,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
)
def get_atp(resource_group_name, storage_account, setting_name):
"""Building query
Args:
resource_group_name (str): Resource Group Name
storage_account (str): Storange Account
setting_name (str): Setting Name
Returns:
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts"
"/{}/providers/Microsoft.Security/advancedThreatProtectionSettings/{}?api-version={}".format(
resource_group_name, storage_account, setting_name, ATP_API_VERSION
)
)
response = http_request("GET", cmd_url)
return response
""" Advanced Threat Protection End """
""" Auto Provisioning Settings Start """
def update_aps_command(args):
"""Updating Analytics Platform System
Args:
args (dict): usually demisto.args()
"""
setting_name = args.get("setting_name")
auto_provision = args.get("auto_provision")
setting = update_aps(setting_name, auto_provision)
outputs = [
{
"Name": setting.get("name"),
"AutoProvision": setting["properties"]["auto_provision"]
if setting.get("properties")
and setting.get("properties").get("auto_provision")
else None,
"ID": setting.get("id"),
}
]
md = tableToMarkdown(
"Azure Security Center - Update Auto Provisioning Setting",
outputs,
["Name", "AutoProvision", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AutoProvisioningSetting(val.ID && val.ID === obj.ID)": outputs
}
entry = {
"Type": entryTypes["note"],
"Contents": setting,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def update_aps(setting_name, auto_provision):
"""Building query
Args:
setting_name (str): Setting name
auto_provision (str): Auto provision setting (On/Off)
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Security/autoProvisioningSettings/{}?api-version={}".format(
setting_name, APS_API_VERSION
)
data = {"properties": {"auto_provision": auto_provision}}
response = http_request("PUT", cmd_url, body=data)
return response
def list_aps_command():
"""List all Analytics Platform System
"""
settings = list_aps().get("value")
outputs = []
for setting in settings:
outputs.append(
{
"Name": setting.get("name"),
"AutoProvision": setting.get("properties").get("autoProvision")
if setting.get("properties")
and setting.get("properties").get("autoProvision")
else None,
"ID": setting.get("id"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Auto Provisioning Settings",
outputs,
["Name", "AutoProvision", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AutoProvisioningSetting(val.ID && val.ID === obj.ID)": outputs
}
entry = {
"Type": entryTypes["note"],
"Contents": settings,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def list_aps():
"""Build query
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Security/autoProvisioningSettings?api-version={}".format(
APS_API_VERSION
)
response = http_request("GET", cmd_url)
return response
def get_aps_command(args):
"""Get given Analytics Platform System setting
Args:
args (dict): usually demisto.args()
"""
setting_name = args.get("setting_name")
setting = get_aps(setting_name)
outputs = [
{
"Name": setting.get("name"),
"AutoProvision": setting.get("properties").get("autoProvision")
if setting.get("properties")
and setting.get("properties").get("autoProvision")
else None,
"ID": setting["id"],
}
]
md = tableToMarkdown(
"Azure Security Center - Get Auto Provisioning Setting",
outputs,
["Name", "AutoProvision", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.AutoProvisioningSetting(val.ID && val.ID === obj.ID)": outputs
}
entry = {
"Type": entryTypes["note"],
"Contents": setting,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def get_aps(setting_name):
"""Build query
Args:
setting_name: Setting name
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Security/autoProvisioningSettings/{}?api-version={}".format(
setting_name, APS_API_VERSION
)
response = http_request("GET", cmd_url)
return response
""" Auto Provisioning Settings End """
""" Information Protection Policies Start """
def list_ipp_command(args):
"""Listing all Internet Presence Provider
Args:
args (dict): usually demisto.args()
"""
management_group = args.get("management_group")
policies = list_ipp(management_group).get("value")
outputs = list()
if policies:
for policy in policies:
if policy.get("properties") and policy.get("properties").get("labels"):
label_names = ", ".join(
[
label.get("displayName")
for label in policy["properties"]["labels"].values()
]
)
information_type_names = ", ".join(
[
it["displayName"]
for it in policy["properties"]["informationTypes"].values()
]
)
else:
label_names, information_type_names = None, None
outputs.append(
{
"Name": policy.get("name"),
"Labels": label_names,
"InformationTypeNames": information_type_names,
"InformationTypes": policy.get("properties").get("informationTypes")
if policy.get("properties")
and policy.get("properties").get("informationTypes")
else None,
"ID": policy["id"],
}
)
md = tableToMarkdown(
"Azure Security Center - List Information Protection Policies",
outputs,
["Name", "Labels", "InformationTypeNames", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.InformationProtectionPolicy(val.ID && val.ID === obj.ID)": outputs
}
entry = {
"Type": entryTypes["note"],
"Contents": policies,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
else:
demisto.results("no ")
def list_ipp(management_group=None):
"""Building query
Args:
management_group: Managment group to pull (if needed)
Returns:
dict: response body
"""
cmd_url = str()
scope_is_subscription = True
if management_group:
cmd_url += "/providers/Microsoft.Management/managementGroups/{}".format(
management_group
)
scope_is_subscription = False
cmd_url += "/providers/Microsoft.Security/informationProtectionPolicies?api-version={}".format(
IPP_API_VERSION
)
response = http_request("GET", cmd_url, add_subscription=scope_is_subscription)
return response
def get_ipp_command(args):
"""Getting Internet Presence Provider information
Args:
args (dict): usually demisto.args()
"""
policy_name = args.get("policy_name")
management_group = args.get("management_group")
policy = get_ipp(policy_name, management_group)
properties = policy.get("properties")
labels = properties.get("labels")
if properties and isinstance(labels, dict):
# Basic Property table
labels = ", ".join(
[
(str(label.get("displayName")) + str(label.get("enabled")))
for label in labels.values()
]
)
basic_table_output = [
{"Name": policy.get("name"), "Labels": labels, "ID": policy.get("id")}
]
md = tableToMarkdown(
"Azure Security Center - Get Information Protection Policy - Basic Property",
basic_table_output,
["Name", "Labels", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.InformationProtectionPolicy(val.ID && val.ID === obj.ID)": basic_table_output
}
basic_table_entry = {
"Type": entryTypes["note"],
"Contents": policy,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
# Information Type table
info_type_table_output = list()
for information_type_data in properties.get("informationTypes").values():
keywords = ", ".join(
[
(
str(keyword.get("displayName"))
+ str(keyword.get("custom"))
+ str(keyword.get("canBeNumeric"))
)
for keyword in information_type_data.get("keywords")
]
)
info_type_table_output.append(
{
"DisplayName": information_type_data.get("displayname"),
"Enabled": information_type_data("enabled"),
"Custom": information_type_data("custom"),
"Keywords": keywords,
"RecommendedLabelID": information_type_data("recommendedLabelId"),
}
)
md = tableToMarkdown(
"Azure Security Center - Get Information Protection Policy - Information Types",
info_type_table_output,
["DisplayName", "Enabled", "Custom", "Keywords", "RecommendedLabelID"],
removeNull=True,
)
info_type_table_entry = {
"Type": entryTypes["note"],
"Contents": properties.get("informationTypes"),
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
}
demisto.results([basic_table_entry, info_type_table_entry])
else:
demisto.results("No properties found in {}".format(management_group))
def get_ipp(policy_name, management_group):
"""Building query
Args:
policy_name (str): Policy name
management_group (str): Managment group
Returns:
dict: respone body
"""
cmd_url = ""
score_is_subscription = True
if management_group:
cmd_url += "/providers/Microsoft.Management/managementGroups/{}".format(
management_group
)
score_is_subscription = False
cmd_url += "/providers/Microsoft.Security/informationProtectionPolicies/{}?api-version={}".format(
policy_name, IPP_API_VERSION
)
response = http_request("GET", cmd_url, add_subscription=score_is_subscription)
return response
""" Information Protection Policies End """
""" Jit Network Access Policies Start """
def list_jit_command(args):
"""Lists all Just-in-time Virtual Machines
Args:
args (dict): usually demisto.args()
"""
asc_location = args.get("asc_location")
resource_group_name = args.get("resource_group_name")
policies = list_jit(asc_location, resource_group_name)["value"]
outputs = []
for policy in policies:
# summarize rules in (VMName: allowPort,...) format
if policy.get("properties") and policy.get("properties").get("virtualMachines"):
rules_data = policy["properties"]["virtualMachines"]
rules_summary_array = []
for rule in rules_data:
ID = rule.get("id")
if isinstance(ID, str):
vm_name = ID.split("/")[-1]
else:
vm_name = None
vm_ports = [str(port.get("number")) for port in rule.get("ports")]
rules_summary_array.append(
"({}: {})".format(vm_name, ", ".join(vm_ports))
)
rules = ", ".join(rules_summary_array)
outputs.append(
{
"Name": policy.get("name"),
"Rules": rules,
"Location": policy.get("location"),
"Kind": policy.get("kind"),
"ID": policy.get("id"),
}
)
md = tableToMarkdown(
"Azure Security Center - List JIT Access Policies",
outputs,
["Name", "Rules", "Location", "Kind"],
removeNull=True,
)
ec = {"AzureSecurityCenter.JITPolicy(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": policies,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def list_jit(asc_location, resource_group_name):
"""Building query
Args:
asc_location: Machine location
resource_group_name: Resource group name
Returns:
dict: response body
"""
cmd_url = ""
if resource_group_name:
cmd_url += "/resourceGroups/{}".format(resource_group_name)
if asc_location:
cmd_url += "/providers/Microsoft.Security/locations/{}".format(asc_location)
cmd_url += "/providers/Microsoft.Security/jitNetworkAccessPolicies?api-version={}".format(
JIT_API_VERSION
)
response = http_request("GET", cmd_url)
return response
def get_jit_command(args):
"""Getting given Just-in-time machine
Args:
args (dict): usually demisto.args()
"""
policy_name = args.get("policy_name")
asc_location = args.get("asc_location")
resource_group_name = args.get("resource_group_name")
policy = get_jit(policy_name, asc_location, resource_group_name)
# Property table
property_table_output = [
{
"Name": policy.get("name"),
"Kind": policy.get("kind"),
"ProvisioningState": policy.get("properties").get("provisioningState")
if policy.get("properties")
and policy.get("properties").get("provisioningState")
else None,
"Location": policy.get("location"),
"Rules": policy.get("properties").get("virtualMachines")
if policy.get("properties")
and policy.get("properties").get("virtualMachines")
else None,
"Requests": policy.get("properties").get("requests")
if policy.get("properties") and policy.get("properties").get("requests")
else None,
"ID": policy.get("id"),
}
]
md = tableToMarkdown(
"Azure Security Center - Get JIT Access Policy - Properties",
property_table_output,
["Name", "Kind", "ProvisioningState", "Location", "ID"],
removeNull=True,
)
ec = {
"AzureSecurityCenter.JITPolicy(val.ID && val.ID === obj.ID)": property_table_output
}
property_table_entry = {
"Type": entryTypes["note"],
"Contents": policy,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
# Rules table
rules_table_output = list()
properties = policy.get("properties")
virtual_machines = properties.get("virtualMachines")
if isinstance(properties, dict) and virtual_machines:
for rule in properties.get("virtualMachines"):
rules_table_output.append(
{
"VmID": rule.get("id"),
"Ports": format_jit_port_rule(rule.get("ports")),
}
)
md = tableToMarkdown(
"Azure Security Center - Get JIT Access Policy - Rules",
rules_table_output,
["VmID", "Ports"],
removeNull=True,
)
rules_table_entry = {
"Type": entryTypes["note"],
"Contents": properties.get("virtualMachines"),
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
}
# Requests table
requests_table_output = list()
for requestData in properties.get("requests"):
vms = list()
for vm in requestData.get("virtualMachines"):
vm_name = vm["id"].split("/")[-1]
vm_ports = format_jit_port_request(vm.get("ports"))
vms.append("[{}: {}]".format(vm_name, vm_ports))
requests_table_output.append(
{
"VirtualMachines": ", ".join(vms),
"Requestor": requestData.get("requestor")
if requestData.get("requestor")
else "service-account",
"StartTimeUtc": requestData.get("startTimeUtc"),
}
)
md = tableToMarkdown(
"Azure Security Center - Get JIT Access Policy - Requests",
requests_table_output,
["VirtualMachines", "Requestor", "StartTimeUtc"],
removeNull=True,
)
requests_table_entry = {
"Type": entryTypes["note"],
"Contents": properties.get("requests"),
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
}
demisto.results([property_table_entry, rules_table_entry, requests_table_entry])
def get_jit(policy_name, asc_location, resource_group_name):
"""Building query
Args:
policy_name: Policy name
asc_location: Machine location
resource_group_name: Resource name group
Returns:
dict: response body
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Security/locations/{}/jitNetworkAccessPolicies/"
"{}?api-version={}".format(
resource_group_name, asc_location, policy_name, JIT_API_VERSION
)
)
response = http_request("GET", cmd_url)
return response
def initiate_jit_command(args):
resource_group_name = args.get("resource_group_name")
asc_location = args.get("asc_location")
policy_name = args.get("policy_name")
vm_id = args.get("vmID")
port = args.get("port")
source_address = args.get("source_address")
duration = args.get("duration")
response = initiate_jit(
resource_group_name,
asc_location,
policy_name,
vm_id,
port,
source_address,
duration,
)
policy_id = (
"/subscriptions/{}/resourceGroups/{}/providers/"
"Microsoft.Security/locations/{}/jitNetworkAccessPolicies/{}".format(
SUBSCRIPTION_ID, resource_group_name, asc_location, policy_name
)
)
virtual_machines = response.get("virtualMachines")
if virtual_machines and len(virtual_machines) > 0:
machine = virtual_machines[0]
port = machine.get("ports")[0]
outputs = {
"VmID": machine.get("id"),
"PortNum": port.get("number"),
"AllowedSourceAddress": port.get("allowedSourceAddressPrefix"),
"EndTimeUtc": port.get("endTimeUtc"),
"Status": port.get("status"),
"Requestor": response.get("requestor"),
"PolicyID": policy_id,
}
md = tableToMarkdown(
"Azure Security Center - Initiate JIT Access Request",
outputs,
[
"VmID",
"PortNum",
"AllowedSourceAddress",
"EndTimeUtc",
"Status",
"Requestor",
],
removeNull=True,
)
ec = {
"AzureSecurityCenter.JITPolicy(val.ID && val.ID ="
"== obj.{}).Initiate(val.endTimeUtc === obj.EndTimeUtc)".format(
policy_id
): outputs
}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": response,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
)
def initiate_jit(
resource_group_name,
asc_location,
policy_name,
vm_id,
port,
source_address,
duration,
):
"""Starting new Just-in-time machine
Args:
resource_group_name: Resource group name
asc_location: Machine location
policy_name: Policy name
vm_id: Virtual Machine ID
port: ports to be used
source_address: Source address
duration: Time in
Returns:
dict: response body
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Security/"
"locations/{}/jitNetworkAccessPolicies/{}/initiate?api-version={}".format(
resource_group_name, asc_location, policy_name, JIT_API_VERSION
)
)
# only supports init access for one vm and one port now
data = {
"virtualMachines": [
{
"ID": vm_id,
"ports": [
{
"number": port,
"duration": duration,
"allowedSourceAddressPrefix": source_address,
}
],
}
]
}
response = http_request("POST", cmd_url, body=data)
return response
def delete_jit_command(args):
"""Deletes a Just-in-time machine
Args:
args (dict): usually demisto.args()
"""
asc_location = args.get("asc_location")
resource_group_name = args.get("resource_group_name")
policy_name = args.get("policy_name")
delete_jit(asc_location, resource_group_name, policy_name)
policy_id = (
"/subscriptions/{}/resourceGroups/"
"{}/providers/Microsoft.Security/locations/{}/jitNetworkAccessPolicies/{}".format(
SUBSCRIPTION_ID, resource_group_name, asc_location, policy_name
)
)
outputs = {"ID": policy_id, "Action": "deleted"}
ec = {"AzureSecurityCenter.JITPolicy(val.ID && val.ID === obj.ID)": outputs}
demisto.results(
{
"Type": entryTypes["note"],
"Contents": "Policy - {} has been deleted sucessfully.".format(policy_name),
"ContentsFormat": formats["text"],
"EntryContext": ec,
}
)
def delete_jit(asc_location, resource_group_name, policy_name):
"""Building query
Args:
asc_location: Machine location
resource_group_name: Resource group name
policy_name: Policy name
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Security/"
"locations/{}/jitNetworkAccessPolicies/{}?api-version={}"
"".format(resource_group_name, asc_location, policy_name, JIT_API_VERSION)
)
http_request("DELETE", cmd_url)
""" Jit Network Access Policies End """
""" Storage Start """
# Add this command to security center integration because ATP-related command requires storage account info
def list_sc_storage_command():
"""Listing all Security Center Storages
"""
accounts = list_sc_storage().get("value")
outputs = list()
for account in accounts:
account_id_array = account.get("id", str()).split("/")
resource_group_name = account_id_array[
account_id_array.index("resourceGroups") + 1
]
outputs.append(
{
"Name": account.get("name"),
"ResourceGroupName": resource_group_name,
"Location": account.get("location"),
"ID": account.get("id"),
}
)
md = tableToMarkdown(
"Azure Security Center - List Storage Accounts",
outputs,
["Name", "ResourceGroupName", "Location"],
removeNull=True,
)
ec = {"AzureSecurityCenter.Storage(val.ID && val.ID === obj.ID)": outputs}
entry = {
"Type": entryTypes["note"],
"Contents": accounts,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": md,
"EntryContext": ec,
}
demisto.results(entry)
def list_sc_storage():
"""Building query
Returns:
dict: response body
"""
cmd_url = "/providers/Microsoft.Storage/storageAccounts?api-version={}".format(
STORAGE_API_VERSION
)
response = http_request("GET", cmd_url)
return response
""" Storage End """
""" Functions start """
if not SUBSCRIPTION_ID:
SUBSCRIPTION_ID = set_subscription_id()
SUBSCRIPTION_URL = "/subscriptions/{}".format(SUBSCRIPTION_ID)
try:
if demisto.command() == "test-module":
# If the command will fail, error will be thrown from the request itself
list_locations()
demisto.results("ok")
elif demisto.command() == "azure-sc-get-alert":
get_alert_command(demisto.args())
elif demisto.command() == "azure-sc-list-alert":
list_alerts_command(demisto.args())
elif demisto.command() == "azure-sc-update-alert":
update_alert_command(demisto.args())
elif demisto.command() == "azure-sc-list-location":
list_locations_command()
elif demisto.command() == "azure-sc-update-atp":
update_atp_command(demisto.args())
elif demisto.command() == "azure-sc-get-atp":
get_atp_command(demisto.args())
elif demisto.command() == "azure-sc-update-aps":
update_aps_command(demisto.args())
elif demisto.command() == "azure-sc-list-aps":
list_aps_command()
elif demisto.command() == "azure-sc-get-aps":
get_aps_command(demisto.args())
elif demisto.command() == "azure-sc-list-ipp":
list_ipp_command(demisto.args())
elif demisto.command() == "azure-sc-get-ipp":
get_ipp_command(demisto.args())
elif demisto.command() == "azure-sc-list-jit":
list_jit_command(demisto.args())
elif demisto.command() == "azure-sc-get-jit":
get_jit_command(demisto.args())
elif demisto.command() == "azure-sc-initiate-jit":
initiate_jit_command(demisto.args())
elif demisto.command() == "azure-sc-delete-jit":
delete_jit_command(demisto.args())
elif demisto.command() == "azure-sc-list-storage":
list_sc_storage_command()
except Exception, e:
LOG(e.message)
LOG.print_log()
raise
|
[] |
[] |
[
"HTTP_PROXY",
"HTTPS_PROXY",
"http_proxy",
"https_proxy"
] |
[]
|
["HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"]
|
python
| 4 | 0 | |
service/initial/environment.go
|
package initial
import (
"fmt"
"os"
"github.com/gin-gonic/gin"
"github.com/spf13/viper"
)
//SetEnvironment to set environment
func SetEnvironment() {
getEnv := os.Getenv("GOCUSTOMENV")
viper.Set("Env", "Development")
if len(getEnv) > 0 {
if getEnv == "production" {
viper.Set("Env", "Production")
gin.SetMode("release")
} else if getEnv == "staging" {
viper.Set("Env", "Staging")
gin.SetMode("release")
}
}
fmt.Println("Environment: " + viper.GetString("Env"))
}
|
[
"\"GOCUSTOMENV\""
] |
[] |
[
"GOCUSTOMENV"
] |
[]
|
["GOCUSTOMENV"]
|
go
| 1 | 0 | |
vendor/code.cloudfoundry.org/cli/command/v2/unset_env_command.go
|
package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/command"
)
type UnsetEnvCommand struct {
usage interface{} `usage:"CF_NAME unset-env APP_NAME ENV_VAR_NAME"`
relatedCommands interface{} `related_commands:"apps, env, restart, set-staging-environment-variable-group, set-running-environment-variable-group"`
}
func (_ UnsetEnvCommand) Setup(config command.Config, ui command.UI) error {
return nil
}
func (_ UnsetEnvCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
|
[
"\"CF_TRACE\""
] |
[] |
[
"CF_TRACE"
] |
[]
|
["CF_TRACE"]
|
go
| 1 | 0 | |
samples/apps/manifest/app/main.go
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strings"
)
func main() {
envs := map[string]string{}
for _, e := range os.Environ() {
x := strings.SplitN(e, "=", 2)
if len(x) != 2 {
continue
}
envs[x[0]] = x[1]
}
log.Fatal(http.ListenAndServe(hostPort(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if err := json.NewEncoder(w).Encode(envs); err != nil {
log.Printf("failed to encode envs: %s", err)
}
})))
}
func hostPort() string {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
return fmt.Sprintf(":%s", port)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
terminfo/mkinfo.go
|
// +build ignore
// Copyright 2019 The TCell Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This command is used to generate suitable configuration files in either
// go syntax or in JSON. It defaults to JSON output on stdout. If no
// term values are specified on the command line, then $TERM is used.
//
// Usage is like this:
//
// mkinfo [-go file.go] [-quiet] [-nofatal] [-I <import>] [-P <pkg}] [<term>...]
//
// -go specifies Go output into the named file. Use - for stdout.
// -nofatal indicates that errors loading definitions should not be fatal
// -P pkg use the supplied package name
// -I import use the named import instead of github.com/gdamore/tcell/terminfo
//
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"io"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"github.com/gdamore/tcell/terminfo"
)
type termcap struct {
name string
desc string
aliases []string
bools map[string]bool
nums map[string]int
strs map[string]string
}
func (tc *termcap) getnum(s string) int {
return (tc.nums[s])
}
func (tc *termcap) getflag(s string) bool {
return (tc.bools[s])
}
func (tc *termcap) getstr(s string) string {
return (tc.strs[s])
}
const (
NONE = iota
CTRL
ESC
)
var notaddressable = errors.New("terminal not cursor addressable")
func unescape(s string) string {
// Various escapes are in \x format. Control codes are
// encoded as ^M (carat followed by ASCII equivalent).
// Escapes are: \e, \E - escape
// \0 NULL, \n \l \r \t \b \f \s for equivalent C escape.
buf := &bytes.Buffer{}
esc := NONE
for i := 0; i < len(s); i++ {
c := s[i]
switch esc {
case NONE:
switch c {
case '\\':
esc = ESC
case '^':
esc = CTRL
default:
buf.WriteByte(c)
}
case CTRL:
buf.WriteByte(c ^ 1<<6)
esc = NONE
case ESC:
switch c {
case 'E', 'e':
buf.WriteByte(0x1b)
case '0', '1', '2', '3', '4', '5', '6', '7':
if i+2 < len(s) && s[i+1] >= '0' && s[i+1] <= '7' && s[i+2] >= '0' && s[i+2] <= '7' {
buf.WriteByte(((c - '0') * 64) + ((s[i+1] - '0') * 8) + (s[i+2] - '0'))
i = i + 2
} else if c == '0' {
buf.WriteByte(0)
}
case 'n':
buf.WriteByte('\n')
case 'r':
buf.WriteByte('\r')
case 't':
buf.WriteByte('\t')
case 'b':
buf.WriteByte('\b')
case 'f':
buf.WriteByte('\f')
case 's':
buf.WriteByte(' ')
case 'l':
panic("WTF: weird format: " + s)
default:
buf.WriteByte(c)
}
esc = NONE
}
}
return (buf.String())
}
func (tc *termcap) setupterm(name string) error {
cmd := exec.Command("infocmp", "-1", name)
output := &bytes.Buffer{}
cmd.Stdout = output
tc.strs = make(map[string]string)
tc.bools = make(map[string]bool)
tc.nums = make(map[string]int)
err := cmd.Run()
if err != nil {
return err
}
// Now parse the output.
// We get comment lines (starting with "#"), followed by
// a header line that looks like "<name>|<alias>|...|<desc>"
// then capabilities, one per line, starting with a tab and ending
// with a comma and newline.
lines := strings.Split(output.String(), "\n")
for len(lines) > 0 && strings.HasPrefix(lines[0], "#") {
lines = lines[1:]
}
// Ditch trailing empty last line
if lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
header := lines[0]
if strings.HasSuffix(header, ",") {
header = header[:len(header)-1]
}
names := strings.Split(header, "|")
tc.name = names[0]
names = names[1:]
if len(names) > 0 {
tc.desc = names[len(names)-1]
names = names[:len(names)-1]
}
tc.aliases = names
for _, val := range lines[1:] {
if (!strings.HasPrefix(val, "\t")) ||
(!strings.HasSuffix(val, ",")) {
return (errors.New("malformed infocmp: " + val))
}
val = val[1:]
val = val[:len(val)-1]
if k := strings.SplitN(val, "=", 2); len(k) == 2 {
tc.strs[k[0]] = unescape(k[1])
} else if k := strings.SplitN(val, "#", 2); len(k) == 2 {
if u, err := strconv.ParseUint(k[1], 0, 0); err != nil {
return (err)
} else {
tc.nums[k[0]] = int(u)
}
} else {
tc.bools[val] = true
}
}
return nil
}
// This program is used to collect data from the system's terminfo library,
// and write it into Go source code. That is, we maintain our terminfo
// capabilities encoded in the program. It should never need to be run by
// an end user, but developers can use this to add codes for additional
// terminal types.
func getinfo(name string) (*terminfo.Terminfo, string, error) {
var tc termcap
if err := tc.setupterm(name); err != nil {
if err != nil {
return nil, "", err
}
}
t := &terminfo.Terminfo{}
// If this is an alias record, then just emit the alias
t.Name = tc.name
if t.Name != name {
return t, "", nil
}
t.Aliases = tc.aliases
t.Colors = tc.getnum("colors")
t.Columns = tc.getnum("cols")
t.Lines = tc.getnum("lines")
t.Bell = tc.getstr("bel")
t.Clear = tc.getstr("clear")
t.EnterCA = tc.getstr("smcup")
t.ExitCA = tc.getstr("rmcup")
t.ShowCursor = tc.getstr("cnorm")
t.HideCursor = tc.getstr("civis")
t.AttrOff = tc.getstr("sgr0")
t.Underline = tc.getstr("smul")
t.Bold = tc.getstr("bold")
t.Blink = tc.getstr("blink")
t.Dim = tc.getstr("dim")
t.Reverse = tc.getstr("rev")
t.EnterKeypad = tc.getstr("smkx")
t.ExitKeypad = tc.getstr("rmkx")
t.SetFg = tc.getstr("setaf")
t.SetBg = tc.getstr("setab")
t.SetCursor = tc.getstr("cup")
t.CursorBack1 = tc.getstr("cub1")
t.CursorUp1 = tc.getstr("cuu1")
t.KeyF1 = tc.getstr("kf1")
t.KeyF2 = tc.getstr("kf2")
t.KeyF3 = tc.getstr("kf3")
t.KeyF4 = tc.getstr("kf4")
t.KeyF5 = tc.getstr("kf5")
t.KeyF6 = tc.getstr("kf6")
t.KeyF7 = tc.getstr("kf7")
t.KeyF8 = tc.getstr("kf8")
t.KeyF9 = tc.getstr("kf9")
t.KeyF10 = tc.getstr("kf10")
t.KeyF11 = tc.getstr("kf11")
t.KeyF12 = tc.getstr("kf12")
t.KeyF13 = tc.getstr("kf13")
t.KeyF14 = tc.getstr("kf14")
t.KeyF15 = tc.getstr("kf15")
t.KeyF16 = tc.getstr("kf16")
t.KeyF17 = tc.getstr("kf17")
t.KeyF18 = tc.getstr("kf18")
t.KeyF19 = tc.getstr("kf19")
t.KeyF20 = tc.getstr("kf20")
t.KeyF21 = tc.getstr("kf21")
t.KeyF22 = tc.getstr("kf22")
t.KeyF23 = tc.getstr("kf23")
t.KeyF24 = tc.getstr("kf24")
t.KeyF25 = tc.getstr("kf25")
t.KeyF26 = tc.getstr("kf26")
t.KeyF27 = tc.getstr("kf27")
t.KeyF28 = tc.getstr("kf28")
t.KeyF29 = tc.getstr("kf29")
t.KeyF30 = tc.getstr("kf30")
t.KeyF31 = tc.getstr("kf31")
t.KeyF32 = tc.getstr("kf32")
t.KeyF33 = tc.getstr("kf33")
t.KeyF34 = tc.getstr("kf34")
t.KeyF35 = tc.getstr("kf35")
t.KeyF36 = tc.getstr("kf36")
t.KeyF37 = tc.getstr("kf37")
t.KeyF38 = tc.getstr("kf38")
t.KeyF39 = tc.getstr("kf39")
t.KeyF40 = tc.getstr("kf40")
t.KeyF41 = tc.getstr("kf41")
t.KeyF42 = tc.getstr("kf42")
t.KeyF43 = tc.getstr("kf43")
t.KeyF44 = tc.getstr("kf44")
t.KeyF45 = tc.getstr("kf45")
t.KeyF46 = tc.getstr("kf46")
t.KeyF47 = tc.getstr("kf47")
t.KeyF48 = tc.getstr("kf48")
t.KeyF49 = tc.getstr("kf49")
t.KeyF50 = tc.getstr("kf50")
t.KeyF51 = tc.getstr("kf51")
t.KeyF52 = tc.getstr("kf52")
t.KeyF53 = tc.getstr("kf53")
t.KeyF54 = tc.getstr("kf54")
t.KeyF55 = tc.getstr("kf55")
t.KeyF56 = tc.getstr("kf56")
t.KeyF57 = tc.getstr("kf57")
t.KeyF58 = tc.getstr("kf58")
t.KeyF59 = tc.getstr("kf59")
t.KeyF60 = tc.getstr("kf60")
t.KeyF61 = tc.getstr("kf61")
t.KeyF62 = tc.getstr("kf62")
t.KeyF63 = tc.getstr("kf63")
t.KeyF64 = tc.getstr("kf64")
t.KeyInsert = tc.getstr("kich1")
t.KeyDelete = tc.getstr("kdch1")
t.KeyBackspace = tc.getstr("kbs")
t.KeyHome = tc.getstr("khome")
t.KeyEnd = tc.getstr("kend")
t.KeyUp = tc.getstr("kcuu1")
t.KeyDown = tc.getstr("kcud1")
t.KeyRight = tc.getstr("kcuf1")
t.KeyLeft = tc.getstr("kcub1")
t.KeyPgDn = tc.getstr("knp")
t.KeyPgUp = tc.getstr("kpp")
t.KeyBacktab = tc.getstr("kcbt")
t.KeyExit = tc.getstr("kext")
t.KeyCancel = tc.getstr("kcan")
t.KeyPrint = tc.getstr("kprt")
t.KeyHelp = tc.getstr("khlp")
t.KeyClear = tc.getstr("kclr")
t.AltChars = tc.getstr("acsc")
t.EnterAcs = tc.getstr("smacs")
t.ExitAcs = tc.getstr("rmacs")
t.EnableAcs = tc.getstr("enacs")
t.Mouse = tc.getstr("kmous")
t.KeyShfRight = tc.getstr("kRIT")
t.KeyShfLeft = tc.getstr("kLFT")
t.KeyShfHome = tc.getstr("kHOM")
t.KeyShfEnd = tc.getstr("kEND")
// Terminfo lacks descriptions for a bunch of modified keys,
// but modern XTerm and emulators often have them. Let's add them,
// if the shifted right and left arrows are defined.
if t.KeyShfRight == "\x1b[1;2C" && t.KeyShfLeft == "\x1b[1;2D" {
t.KeyShfUp = "\x1b[1;2A"
t.KeyShfDown = "\x1b[1;2B"
t.KeyMetaUp = "\x1b[1;9A"
t.KeyMetaDown = "\x1b[1;9B"
t.KeyMetaRight = "\x1b[1;9C"
t.KeyMetaLeft = "\x1b[1;9D"
t.KeyAltUp = "\x1b[1;3A"
t.KeyAltDown = "\x1b[1;3B"
t.KeyAltRight = "\x1b[1;3C"
t.KeyAltLeft = "\x1b[1;3D"
t.KeyCtrlUp = "\x1b[1;5A"
t.KeyCtrlDown = "\x1b[1;5B"
t.KeyCtrlRight = "\x1b[1;5C"
t.KeyCtrlLeft = "\x1b[1;5D"
t.KeyAltShfUp = "\x1b[1;4A"
t.KeyAltShfDown = "\x1b[1;4B"
t.KeyAltShfRight = "\x1b[1;4C"
t.KeyAltShfLeft = "\x1b[1;4D"
t.KeyMetaShfUp = "\x1b[1;10A"
t.KeyMetaShfDown = "\x1b[1;10B"
t.KeyMetaShfRight = "\x1b[1;10C"
t.KeyMetaShfLeft = "\x1b[1;10D"
t.KeyCtrlShfUp = "\x1b[1;6A"
t.KeyCtrlShfDown = "\x1b[1;6B"
t.KeyCtrlShfRight = "\x1b[1;6C"
t.KeyCtrlShfLeft = "\x1b[1;6D"
}
// And also for Home and End
if t.KeyShfHome == "\x1b[1;2H" && t.KeyShfEnd == "\x1b[1;2F" {
t.KeyCtrlHome = "\x1b[1;5H"
t.KeyCtrlEnd = "\x1b[1;5F"
t.KeyAltHome = "\x1b[1;9H"
t.KeyAltEnd = "\x1b[1;9F"
t.KeyCtrlShfHome = "\x1b[1;6H"
t.KeyCtrlShfEnd = "\x1b[1;6F"
t.KeyAltShfHome = "\x1b[1;4H"
t.KeyAltShfEnd = "\x1b[1;4F"
t.KeyMetaShfHome = "\x1b[1;10H"
t.KeyMetaShfEnd = "\x1b[1;10F"
}
// And the same thing for rxvt and workalikes (Eterm, aterm, etc.)
// It seems that urxvt at least send ESC as ALT prefix for these,
// although some places seem to indicate a separate ALT key sesquence.
if t.KeyShfRight == "\x1b[c" && t.KeyShfLeft == "\x1b[d" {
t.KeyShfUp = "\x1b[a"
t.KeyShfDown = "\x1b[b"
t.KeyCtrlUp = "\x1b[Oa"
t.KeyCtrlDown = "\x1b[Ob"
t.KeyCtrlRight = "\x1b[Oc"
t.KeyCtrlLeft = "\x1b[Od"
}
if t.KeyShfHome == "\x1b[7$" && t.KeyShfEnd == "\x1b[8$" {
t.KeyCtrlHome = "\x1b[7^"
t.KeyCtrlEnd = "\x1b[8^"
}
// If the kmous entry is present, then we need to record the
// the codes to enter and exit mouse mode. Sadly, this is not
// part of the terminfo databases anywhere that I've found, but
// is an extension. The escape codes are documented in the XTerm
// manual, and all terminals that have kmous are expected to
// use these same codes, unless explicitly configured otherwise
// vi XM. Note that in any event, we only known how to parse either
// x11 or SGR mouse events -- if your terminal doesn't support one
// of these two forms, you maybe out of luck.
t.MouseMode = tc.getstr("XM")
if t.Mouse != "" && t.MouseMode == "" {
// we anticipate that all xterm mouse tracking compatible
// terminals understand mouse tracking (1000), but we hope
// that those that don't understand any-event tracking (1003)
// will at least ignore it. Likewise we hope that terminals
// that don't understand SGR reporting (1006) just ignore it.
t.MouseMode = "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;" +
"\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c"
}
// We only support colors in ANSI 8 or 256 color mode.
if t.Colors < 8 || t.SetFg == "" {
t.Colors = 0
}
if t.SetCursor == "" {
return nil, "", notaddressable
}
// For padding, we lookup the pad char. If that isn't present,
// and npc is *not* set, then we assume a null byte.
t.PadChar = tc.getstr("pad")
if t.PadChar == "" {
if !tc.getflag("npc") {
t.PadChar = "\u0000"
}
}
// For terminals that use "standard" SGR sequences, lets combine the
// foreground and background together.
if strings.HasPrefix(t.SetFg, "\x1b[") &&
strings.HasPrefix(t.SetBg, "\x1b[") &&
strings.HasSuffix(t.SetFg, "m") &&
strings.HasSuffix(t.SetBg, "m") {
fg := t.SetFg[:len(t.SetFg)-1]
r := regexp.MustCompile("%p1")
bg := r.ReplaceAllString(t.SetBg[2:], "%p2")
t.SetFgBg = fg + ";" + bg
}
return t, tc.desc, nil
}
func dotGoAddInt(w io.Writer, n string, i int) {
if i == 0 {
// initialized to 0, ignore
return
}
fmt.Fprintf(w, "\t\t%-13s %d,\n", n+":", i)
}
func dotGoAddStr(w io.Writer, n string, s string) {
if s == "" {
return
}
fmt.Fprintf(w, "\t\t%-13s %q,\n", n+":", s)
}
func dotGoAddArr(w io.Writer, n string, a []string) {
if len(a) == 0 {
return
}
fmt.Fprintf(w, "\t\t%-13s []string{", n+":")
did := false
for _, b := range a {
if did {
fmt.Fprint(w, ", ")
}
did = true
fmt.Fprintf(w, "%q", b)
}
fmt.Fprintln(w, "},")
}
func dotGoHeader(w io.Writer, packname, tipackname string) {
fmt.Fprintln(w, "// Generated automatically. DO NOT HAND-EDIT.")
fmt.Fprintln(w, "")
fmt.Fprintf(w, "package %s\n", packname)
fmt.Fprintln(w, "")
fmt.Fprintf(w, "import \"%s\"\n", tipackname)
fmt.Fprintln(w, "")
}
func dotGoTrailer(w io.Writer) {
}
func dotGoInfo(w io.Writer, terms []*TData) {
fmt.Fprintln(w, "func init() {")
for _, t := range terms {
fmt.Fprintf(w, "\n\t// %s\n", t.Desc)
fmt.Fprintln(w, "\tterminfo.AddTerminfo(&terminfo.Terminfo{")
dotGoAddStr(w, "Name", t.Name)
dotGoAddArr(w, "Aliases", t.Aliases)
dotGoAddInt(w, "Columns", t.Columns)
dotGoAddInt(w, "Lines", t.Lines)
dotGoAddInt(w, "Colors", t.Colors)
dotGoAddStr(w, "Bell", t.Bell)
dotGoAddStr(w, "Clear", t.Clear)
dotGoAddStr(w, "EnterCA", t.EnterCA)
dotGoAddStr(w, "ExitCA", t.ExitCA)
dotGoAddStr(w, "ShowCursor", t.ShowCursor)
dotGoAddStr(w, "HideCursor", t.HideCursor)
dotGoAddStr(w, "AttrOff", t.AttrOff)
dotGoAddStr(w, "Underline", t.Underline)
dotGoAddStr(w, "Bold", t.Bold)
dotGoAddStr(w, "Dim", t.Dim)
dotGoAddStr(w, "Blink", t.Blink)
dotGoAddStr(w, "Reverse", t.Reverse)
dotGoAddStr(w, "EnterKeypad", t.EnterKeypad)
dotGoAddStr(w, "ExitKeypad", t.ExitKeypad)
dotGoAddStr(w, "SetFg", t.SetFg)
dotGoAddStr(w, "SetBg", t.SetBg)
dotGoAddStr(w, "SetFgBg", t.SetFgBg)
dotGoAddStr(w, "PadChar", t.PadChar)
dotGoAddStr(w, "AltChars", t.AltChars)
dotGoAddStr(w, "EnterAcs", t.EnterAcs)
dotGoAddStr(w, "ExitAcs", t.ExitAcs)
dotGoAddStr(w, "EnableAcs", t.EnableAcs)
dotGoAddStr(w, "SetFgRGB", t.SetFgRGB)
dotGoAddStr(w, "SetBgRGB", t.SetBgRGB)
dotGoAddStr(w, "SetFgBgRGB", t.SetFgBgRGB)
dotGoAddStr(w, "Mouse", t.Mouse)
dotGoAddStr(w, "MouseMode", t.MouseMode)
dotGoAddStr(w, "SetCursor", t.SetCursor)
dotGoAddStr(w, "CursorBack1", t.CursorBack1)
dotGoAddStr(w, "CursorUp1", t.CursorUp1)
dotGoAddStr(w, "KeyUp", t.KeyUp)
dotGoAddStr(w, "KeyDown", t.KeyDown)
dotGoAddStr(w, "KeyRight", t.KeyRight)
dotGoAddStr(w, "KeyLeft", t.KeyLeft)
dotGoAddStr(w, "KeyInsert", t.KeyInsert)
dotGoAddStr(w, "KeyDelete", t.KeyDelete)
dotGoAddStr(w, "KeyBackspace", t.KeyBackspace)
dotGoAddStr(w, "KeyHome", t.KeyHome)
dotGoAddStr(w, "KeyEnd", t.KeyEnd)
dotGoAddStr(w, "KeyPgUp", t.KeyPgUp)
dotGoAddStr(w, "KeyPgDn", t.KeyPgDn)
dotGoAddStr(w, "KeyF1", t.KeyF1)
dotGoAddStr(w, "KeyF2", t.KeyF2)
dotGoAddStr(w, "KeyF3", t.KeyF3)
dotGoAddStr(w, "KeyF4", t.KeyF4)
dotGoAddStr(w, "KeyF5", t.KeyF5)
dotGoAddStr(w, "KeyF6", t.KeyF6)
dotGoAddStr(w, "KeyF7", t.KeyF7)
dotGoAddStr(w, "KeyF8", t.KeyF8)
dotGoAddStr(w, "KeyF9", t.KeyF9)
dotGoAddStr(w, "KeyF10", t.KeyF10)
dotGoAddStr(w, "KeyF11", t.KeyF11)
dotGoAddStr(w, "KeyF12", t.KeyF12)
dotGoAddStr(w, "KeyF13", t.KeyF13)
dotGoAddStr(w, "KeyF14", t.KeyF14)
dotGoAddStr(w, "KeyF15", t.KeyF15)
dotGoAddStr(w, "KeyF16", t.KeyF16)
dotGoAddStr(w, "KeyF17", t.KeyF17)
dotGoAddStr(w, "KeyF18", t.KeyF18)
dotGoAddStr(w, "KeyF19", t.KeyF19)
dotGoAddStr(w, "KeyF20", t.KeyF20)
dotGoAddStr(w, "KeyF21", t.KeyF21)
dotGoAddStr(w, "KeyF22", t.KeyF22)
dotGoAddStr(w, "KeyF23", t.KeyF23)
dotGoAddStr(w, "KeyF24", t.KeyF24)
dotGoAddStr(w, "KeyF25", t.KeyF25)
dotGoAddStr(w, "KeyF26", t.KeyF26)
dotGoAddStr(w, "KeyF27", t.KeyF27)
dotGoAddStr(w, "KeyF28", t.KeyF28)
dotGoAddStr(w, "KeyF29", t.KeyF29)
dotGoAddStr(w, "KeyF30", t.KeyF30)
dotGoAddStr(w, "KeyF31", t.KeyF31)
dotGoAddStr(w, "KeyF32", t.KeyF32)
dotGoAddStr(w, "KeyF33", t.KeyF33)
dotGoAddStr(w, "KeyF34", t.KeyF34)
dotGoAddStr(w, "KeyF35", t.KeyF35)
dotGoAddStr(w, "KeyF36", t.KeyF36)
dotGoAddStr(w, "KeyF37", t.KeyF37)
dotGoAddStr(w, "KeyF38", t.KeyF38)
dotGoAddStr(w, "KeyF39", t.KeyF39)
dotGoAddStr(w, "KeyF40", t.KeyF40)
dotGoAddStr(w, "KeyF41", t.KeyF41)
dotGoAddStr(w, "KeyF42", t.KeyF42)
dotGoAddStr(w, "KeyF43", t.KeyF43)
dotGoAddStr(w, "KeyF44", t.KeyF44)
dotGoAddStr(w, "KeyF45", t.KeyF45)
dotGoAddStr(w, "KeyF46", t.KeyF46)
dotGoAddStr(w, "KeyF47", t.KeyF47)
dotGoAddStr(w, "KeyF48", t.KeyF48)
dotGoAddStr(w, "KeyF49", t.KeyF49)
dotGoAddStr(w, "KeyF50", t.KeyF50)
dotGoAddStr(w, "KeyF51", t.KeyF51)
dotGoAddStr(w, "KeyF52", t.KeyF52)
dotGoAddStr(w, "KeyF53", t.KeyF53)
dotGoAddStr(w, "KeyF54", t.KeyF54)
dotGoAddStr(w, "KeyF55", t.KeyF55)
dotGoAddStr(w, "KeyF56", t.KeyF56)
dotGoAddStr(w, "KeyF57", t.KeyF57)
dotGoAddStr(w, "KeyF58", t.KeyF58)
dotGoAddStr(w, "KeyF59", t.KeyF59)
dotGoAddStr(w, "KeyF60", t.KeyF60)
dotGoAddStr(w, "KeyF61", t.KeyF61)
dotGoAddStr(w, "KeyF62", t.KeyF62)
dotGoAddStr(w, "KeyF63", t.KeyF63)
dotGoAddStr(w, "KeyF64", t.KeyF64)
dotGoAddStr(w, "KeyCancel", t.KeyCancel)
dotGoAddStr(w, "KeyPrint", t.KeyPrint)
dotGoAddStr(w, "KeyExit", t.KeyExit)
dotGoAddStr(w, "KeyHelp", t.KeyHelp)
dotGoAddStr(w, "KeyClear", t.KeyClear)
dotGoAddStr(w, "KeyBacktab", t.KeyBacktab)
dotGoAddStr(w, "KeyShfLeft", t.KeyShfLeft)
dotGoAddStr(w, "KeyShfRight", t.KeyShfRight)
dotGoAddStr(w, "KeyShfUp", t.KeyShfUp)
dotGoAddStr(w, "KeyShfDown", t.KeyShfDown)
dotGoAddStr(w, "KeyCtrlLeft", t.KeyCtrlLeft)
dotGoAddStr(w, "KeyCtrlRight", t.KeyCtrlRight)
dotGoAddStr(w, "KeyCtrlUp", t.KeyCtrlUp)
dotGoAddStr(w, "KeyCtrlDown", t.KeyCtrlDown)
dotGoAddStr(w, "KeyMetaLeft", t.KeyMetaLeft)
dotGoAddStr(w, "KeyMetaRight", t.KeyMetaRight)
dotGoAddStr(w, "KeyMetaUp", t.KeyMetaUp)
dotGoAddStr(w, "KeyMetaDown", t.KeyMetaDown)
dotGoAddStr(w, "KeyAltLeft", t.KeyAltLeft)
dotGoAddStr(w, "KeyAltRight", t.KeyAltRight)
dotGoAddStr(w, "KeyAltUp", t.KeyAltUp)
dotGoAddStr(w, "KeyAltDown", t.KeyAltDown)
dotGoAddStr(w, "KeyAltShfLeft", t.KeyAltShfLeft)
dotGoAddStr(w, "KeyAltShfRight", t.KeyAltShfRight)
dotGoAddStr(w, "KeyAltShfUp", t.KeyAltShfUp)
dotGoAddStr(w, "KeyAltShfDown", t.KeyAltShfDown)
dotGoAddStr(w, "KeyMetaShfLeft", t.KeyMetaShfLeft)
dotGoAddStr(w, "KeyMetaShfRight", t.KeyMetaShfRight)
dotGoAddStr(w, "KeyMetaShfUp", t.KeyMetaShfUp)
dotGoAddStr(w, "KeyMetaShfDown", t.KeyMetaShfDown)
dotGoAddStr(w, "KeyCtrlShfLeft", t.KeyCtrlShfLeft)
dotGoAddStr(w, "KeyCtrlShfRight", t.KeyCtrlShfRight)
dotGoAddStr(w, "KeyCtrlShfUp", t.KeyCtrlShfUp)
dotGoAddStr(w, "KeyCtrlShfDown", t.KeyCtrlShfDown)
dotGoAddStr(w, "KeyShfHome", t.KeyShfHome)
dotGoAddStr(w, "KeyShfEnd", t.KeyShfEnd)
dotGoAddStr(w, "KeyCtrlHome", t.KeyCtrlHome)
dotGoAddStr(w, "KeyCtrlEnd", t.KeyCtrlEnd)
dotGoAddStr(w, "KeyMetaHome", t.KeyMetaHome)
dotGoAddStr(w, "KeyMetaEnd", t.KeyMetaEnd)
dotGoAddStr(w, "KeyAltHome", t.KeyAltHome)
dotGoAddStr(w, "KeyAltEnd", t.KeyAltEnd)
dotGoAddStr(w, "KeyCtrlShfHome", t.KeyCtrlShfHome)
dotGoAddStr(w, "KeyCtrlShfEnd", t.KeyCtrlShfEnd)
dotGoAddStr(w, "KeyMetaShfHome", t.KeyMetaShfHome)
dotGoAddStr(w, "KeyMetaShfEnd", t.KeyMetaShfEnd)
dotGoAddStr(w, "KeyAltShfHome", t.KeyAltShfHome)
dotGoAddStr(w, "KeyAltShfEnd", t.KeyAltShfEnd)
fmt.Fprintln(w, "\t})")
}
fmt.Fprintln(w, "}")
}
var packname = ""
var tipackname = "github.com/gdamore/tcell/terminfo"
func dotGoFile(fname string, terms []*TData) error {
w := os.Stdout
var e error
if fname != "-" && fname != "" {
if w, e = os.Create(fname); e != nil {
return e
}
}
if packname == "" {
packname = strings.Replace(terms[0].Name, "-", "_", -1)
}
dotGoHeader(w, packname, tipackname)
dotGoInfo(w, terms)
dotGoTrailer(w)
if w != os.Stdout {
w.Close()
}
cmd := exec.Command("go", "fmt", fname)
cmd.Run()
return nil
}
type TData struct {
Desc string
terminfo.Terminfo
}
func main() {
gofile := ""
nofatal := false
quiet := false
all := false
flag.StringVar(&gofile, "go", "", "generate go source in named file")
flag.StringVar(&tipackname, "I", tipackname, "import package path")
flag.StringVar(&packname, "P", packname, "package name (go source)")
flag.BoolVar(&nofatal, "nofatal", false, "errors are not fatal")
flag.BoolVar(&quiet, "quiet", false, "suppress error messages")
flag.BoolVar(&all, "all", false, "load all terminals from terminfo")
flag.Parse()
var e error
args := flag.Args()
if len(args) == 0 {
args = []string{os.Getenv("TERM")}
}
tdata := make([]*TData, 0)
for _, term := range args {
if t, desc, e := getinfo(term); e != nil {
if all && e == notaddressable {
continue
}
if !quiet {
fmt.Fprintf(os.Stderr,
"Failed loading %s: %v\n", term, e)
}
if !nofatal {
os.Exit(1)
}
} else {
tdata = append(tdata, &TData{
Desc: desc,
Terminfo: *t,
})
}
}
if len(tdata) == 0 {
// No data.
os.Exit(0)
}
e = dotGoFile(gofile, tdata)
if e != nil {
fmt.Fprintf(os.Stderr, "Failed %s: %v", gofile, e)
os.Exit(1)
}
}
|
[
"\"TERM\""
] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
go
| 1 | 0 | |
busman/wsgi.py
|
"""
WSGI config for busman project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'busman.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/nearby/main.go
|
package main
import (
"floqars/internal/nearbyserver"
"floqars/rpc/nearby"
"floqars/shared"
"log"
"net/http"
"os"
)
func main() {
shared.Connect()
port := os.Getenv("PORT")
if port == "" {
log.Fatal("$PORT must be set")
}
server := &nearbyserver.NearbyServer{}
twirpHandler := nearby.NewNearbyServer(server, nil)
http.ListenAndServe(":"+port, twirpHandler)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
shapely/geos.py
|
"""
Proxies for libgeos, GEOS-specific exceptions, and utilities
"""
import atexit
from ctypes import (
CDLL, cdll, pointer, string_at, DEFAULT_MODE, c_void_p, c_size_t, c_char_p)
from ctypes.util import find_library
import glob
import logging
import os
import re
import sys
import threading
from functools import partial
from .ctypes_declarations import prototype, EXCEPTION_HANDLER_FUNCTYPE
from .errors import InvalidGeometryError, WKBReadingError, WKTReadingError, TopologicalError, PredicateError
# Add message handler to this module's logger
LOG = logging.getLogger(__name__)
# Find and load the GEOS and C libraries
# If this ever gets any longer, we'll break it into separate modules
def load_dll(libname, fallbacks=None, mode=DEFAULT_MODE):
lib = find_library(libname)
dll = None
if lib is not None:
try:
LOG.debug("Trying `CDLL(%s)`", lib)
dll = CDLL(lib, mode=mode)
except OSError:
LOG.debug("Failed `CDLL(%s)`", lib)
pass
if not dll and fallbacks is not None:
for name in fallbacks:
try:
LOG.debug("Trying `CDLL(%s)`", name)
dll = CDLL(name, mode=mode)
except OSError:
# move on to the next fallback
LOG.debug("Failed `CDLL(%s)`", name)
pass
if dll:
LOG.debug("Library path: %r", lib or name)
LOG.debug("DLL: %r", dll)
return dll
else:
# No shared library was loaded. Raise OSError.
raise OSError(
"Could not find lib {} or load any of its variants {}.".format(
libname, fallbacks or []))
_lgeos = None
def exists_conda_env():
"""Does this module exist in a conda environment?"""
return os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
if sys.platform.startswith('linux'):
# Test to see if we have a wheel repaired by auditwheel which contains its
# own libgeos_c. Note: auditwheel 3.1 changed the location of libs.
geos_whl_so = glob.glob(
os.path.abspath(os.path.join(os.path.dirname(__file__), ".libs/libgeos*.so*"))
) or glob.glob(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), "..", "Shapely.libs", "libgeos*.so*"
)
)
)
if len(geos_whl_so) > 0:
# We have observed problems with CDLL of libgeos_c not automatically
# loading the sibling c++ library since the change made by auditwheel
# 3.1, so we explicitly load them both.
geos_whl_so = sorted(geos_whl_so)
CDLL(geos_whl_so[0])
_lgeos = CDLL(geos_whl_so[-1])
LOG.debug("Found GEOS DLL: %r, using it.", _lgeos)
elif hasattr(sys, 'frozen'):
geos_pyinstaller_so = glob.glob(os.path.join(sys.prefix, 'libgeos_c-*.so.*'))
if len(geos_pyinstaller_so) >= 1:
_lgeos = CDLL(geos_pyinstaller_so[0])
LOG.debug("Found GEOS DLL: %r, using it.", _lgeos)
elif exists_conda_env():
# conda package.
_lgeos = CDLL(os.path.join(sys.prefix, 'lib', 'libgeos_c.so'))
else:
alt_paths = [
'libgeos_c.so.1',
'libgeos_c.so',
]
_lgeos = load_dll('libgeos_c', fallbacks=alt_paths)
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc Python is actually using.
free = CDLL(None).free
free.argtypes = [c_void_p]
free.restype = None
elif sys.platform == 'darwin':
# Test to see if we have a delocated wheel with a GEOS dylib.
geos_whl_dylib = os.path.abspath(os.path.join(os.path.dirname(
__file__), '.dylibs/libgeos_c.1.dylib'))
if os.path.exists(geos_whl_dylib):
handle = CDLL(None)
if hasattr(handle, "initGEOS_r"):
LOG.debug("GEOS already loaded")
_lgeos = handle
else:
_lgeos = CDLL(geos_whl_dylib)
LOG.debug("Found GEOS DLL: %r, using it.", _lgeos)
elif exists_conda_env():
# conda package.
_lgeos = CDLL(os.path.join(sys.prefix, 'lib', 'libgeos_c.dylib'))
else:
if hasattr(sys, 'frozen'):
try:
# .app file from py2app
alt_paths = [os.path.join(
os.environ['RESOURCEPATH'], '..', 'Frameworks',
'libgeos_c.dylib')]
except KeyError:
alt_paths = [
# binary from pyinstaller
os.path.join(sys.executable, 'libgeos_c.dylib'),
# .app from cx_Freeze
os.path.join(os.path.dirname(sys.executable), 'libgeos_c.1.dylib')]
if hasattr(sys, '_MEIPASS'):
alt_paths.append(
os.path.join(sys._MEIPASS, 'libgeos_c.1.dylib'))
else:
alt_paths = [
# The Framework build from Kyng Chaos
"/Library/Frameworks/GEOS.framework/Versions/Current/GEOS",
# macports
'/opt/local/lib/libgeos_c.dylib',
# homebrew Intel
'/usr/local/lib/libgeos_c.dylib',
# homebrew Apple Silicon
'/opt/homebrew/lib/libgeos_c.dylib',
]
_lgeos = load_dll('geos_c', fallbacks=alt_paths)
free = CDLL(None).free
free.argtypes = [c_void_p]
free.restype = None
elif sys.platform == 'win32':
_conda_dll_path = os.path.join(sys.prefix, 'Library', 'bin', 'geos_c.dll')
if exists_conda_env() and os.path.exists(_conda_dll_path):
# conda package.
_lgeos = CDLL(_conda_dll_path)
else:
try:
egg_dlls = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'DLLs'))
if hasattr(sys, '_MEIPASS'):
wininst_dlls = sys._MEIPASS
elif hasattr(sys, "frozen"):
wininst_dlls = os.path.normpath(
os.path.abspath(sys.executable + '../../DLLS'))
else:
wininst_dlls = os.path.abspath(os.__file__ + "../../../DLLs")
original_path = os.environ['PATH']
os.environ['PATH'] = "%s;%s;%s" % \
(egg_dlls, wininst_dlls, original_path)
_lgeos = load_dll("geos_c.dll")
except (ImportError, WindowsError, OSError):
raise
def free(m):
try:
cdll.msvcrt.free(m)
except WindowsError:
# XXX: See http://trac.gispython.org/projects/PCL/ticket/149
pass
elif sys.platform == 'sunos5':
_lgeos = load_dll('geos_c', fallbacks=['libgeos_c.so.1', 'libgeos_c.so'])
free.restype = None
free.argtypes = [c_void_p]
free.restype = None
else: # other *nix systems
_lgeos = load_dll('geos_c', fallbacks=['libgeos_c.so.1', 'libgeos_c.so'])
free = CDLL(None).free
free.argtypes = [c_void_p]
free.restype = None
def _geos_version():
GEOSversion = _lgeos.GEOSversion
GEOSversion.restype = c_char_p
GEOSversion.argtypes = []
geos_version_string = GEOSversion().decode('ascii')
res = re.findall(r'(\d+)\.(\d+)\.(\d+)', geos_version_string)
assert len(res) == 2, res
geos_version = tuple(int(x) for x in res[0])
capi_version = tuple(int(x) for x in res[1])
return geos_version_string, geos_version, capi_version
geos_version_string, geos_version, geos_capi_version = _geos_version()
# Record a baseline so that we know what additional functions are declared
# in ctypes_declarations.
start_set = set(_lgeos.__dict__)
# Apply prototypes for the libgeos_c functions
prototype(_lgeos, geos_version)
# Automatically detect all function declarations, and declare their
# re-entrant counterpart.
end_set = set(_lgeos.__dict__)
new_func_names = end_set - start_set
for func_name in new_func_names:
new_func_name = "%s_r" % func_name
if hasattr(_lgeos, new_func_name):
new_func = getattr(_lgeos, new_func_name)
old_func = getattr(_lgeos, func_name)
new_func.restype = old_func.restype
if old_func.argtypes is None:
# Handle functions that didn't take an argument before,
# finishGEOS.
new_func.argtypes = [c_void_p]
else:
new_func.argtypes = [c_void_p] + list(old_func.argtypes)
if old_func.errcheck is not None:
new_func.errcheck = old_func.errcheck
# Handle special case.
_lgeos.initGEOS_r.restype = c_void_p
_lgeos.initGEOS_r.argtypes = \
[EXCEPTION_HANDLER_FUNCTYPE, EXCEPTION_HANDLER_FUNCTYPE]
_lgeos.finishGEOS_r.argtypes = [c_void_p]
def make_logging_callback(func):
"""Error or notice handler callback producr
Wraps a logger method, func, as a GEOS callback.
"""
def callback(fmt, *fmt_args):
fmt = fmt.decode('ascii')
conversions = re.findall(r'%.', fmt)
args = [
string_at(arg).decode('ascii')
for spec, arg in zip(conversions, fmt_args)
if spec == '%s' and arg is not None]
func(fmt, *args)
return callback
error_handler = make_logging_callback(LOG.error)
notice_handler = make_logging_callback(LOG.info)
error_h = EXCEPTION_HANDLER_FUNCTYPE(error_handler)
notice_h = EXCEPTION_HANDLER_FUNCTYPE(notice_handler)
class WKTReader:
_lgeos = None
_reader = None
def __init__(self, lgeos):
"""Create WKT Reader"""
self._lgeos = lgeos
self._reader = self._lgeos.GEOSWKTReader_create()
def __del__(self):
"""Destroy WKT Reader"""
if self._lgeos is not None:
self._lgeos.GEOSWKTReader_destroy(self._reader)
self._reader = None
self._lgeos = None
def read(self, text):
"""Returns geometry from WKT"""
if not isinstance(text, str):
raise TypeError("Only str is accepted.")
text = text.encode()
c_string = c_char_p(text)
geom = self._lgeos.GEOSWKTReader_read(self._reader, c_string)
if not geom:
raise WKTReadingError(
"Could not create geometry because of errors "
"while reading input.")
# avoid circular import dependency
from shapely.geometry.base import geom_factory
return geom_factory(geom)
class WKTWriter:
_lgeos = None
_writer = None
# Establish default output settings
defaults = {
'trim': True,
'output_dimension': 3,
}
# GEOS' defaults for methods without "get"
_trim = False
_rounding_precision = -1
_old_3d = False
@property
def trim(self):
"""Trimming of unnecessary decimals (default: True)"""
return getattr(self, '_trim')
@trim.setter
def trim(self, value):
self._trim = bool(value)
self._lgeos.GEOSWKTWriter_setTrim(self._writer, self._trim)
@property
def rounding_precision(self):
"""Rounding precision when writing the WKT.
A precision of -1 (default) disables it."""
return getattr(self, '_rounding_precision')
@rounding_precision.setter
def rounding_precision(self, value):
self._rounding_precision = int(value)
self._lgeos.GEOSWKTWriter_setRoundingPrecision(
self._writer, self._rounding_precision)
@property
def output_dimension(self):
"""Output dimension, either 2 or 3 (default)"""
return self._lgeos.GEOSWKTWriter_getOutputDimension(
self._writer)
@output_dimension.setter
def output_dimension(self, value):
self._lgeos.GEOSWKTWriter_setOutputDimension(
self._writer, int(value))
@property
def old_3d(self):
"""Show older style for 3D WKT, without 'Z' (default: False)"""
return getattr(self, '_old_3d')
@old_3d.setter
def old_3d(self, value):
self._old_3d = bool(value)
self._lgeos.GEOSWKTWriter_setOld3D(self._writer, self._old_3d)
def __init__(self, lgeos, **settings):
"""Create WKT Writer
Note: older formatting before GEOS 3.3.0 can be achieved by setting
the properties:
trim = False
output_dimension = 2
"""
self._lgeos = lgeos
self._writer = self._lgeos.GEOSWKTWriter_create()
applied_settings = self.defaults.copy()
applied_settings.update(settings)
for name in applied_settings:
setattr(self, name, applied_settings[name])
def __setattr__(self, name, value):
"""Limit setting attributes"""
if hasattr(self, name):
object.__setattr__(self, name, value)
else:
raise AttributeError('%r object has no attribute %r' %
(self.__class__.__name__, name))
def __del__(self):
"""Destroy WKT Writer"""
if self._lgeos is not None:
self._lgeos.GEOSWKTWriter_destroy(self._writer)
self._writer = None
self._lgeos = None
def write(self, geom):
"""Returns WKT string for geometry"""
if geom is None or geom._geom is None:
raise InvalidGeometryError("Null geometry supports no operations")
result = self._lgeos.GEOSWKTWriter_write(self._writer, geom._geom)
text = string_at(result)
lgeos.GEOSFree(result)
return text.decode('ascii')
class WKBReader:
_lgeos = None
_reader = None
def __init__(self, lgeos):
"""Create WKB Reader"""
self._lgeos = lgeos
self._reader = self._lgeos.GEOSWKBReader_create()
def __del__(self):
"""Destroy WKB Reader"""
if self._lgeos is not None:
self._lgeos.GEOSWKBReader_destroy(self._reader)
self._reader = None
self._lgeos = None
def read(self, data):
"""Returns geometry from WKB"""
geom = self._lgeos.GEOSWKBReader_read(
self._reader, c_char_p(data), c_size_t(len(data)))
if not geom:
raise WKBReadingError(
"Could not create geometry because of errors "
"while reading input.")
# avoid circular import dependency
from shapely import geometry
return geometry.base.geom_factory(geom)
def read_hex(self, data):
"""Returns geometry from WKB hex"""
data = data.encode('ascii')
geom = self._lgeos.GEOSWKBReader_readHEX(
self._reader, c_char_p(data), c_size_t(len(data)))
if not geom:
raise WKBReadingError(
"Could not create geometry because of errors "
"while reading input.")
# avoid circular import dependency
from shapely import geometry
return geometry.base.geom_factory(geom)
class WKBWriter:
_lgeos = None
_writer = None
# EndianType enum in ByteOrderValues.h
_ENDIAN_BIG = 0
_ENDIAN_LITTLE = 1
# Establish default output setting
defaults = {'output_dimension': 3}
@property
def output_dimension(self):
"""Output dimension, either 2 or 3 (default)"""
return self._lgeos.GEOSWKBWriter_getOutputDimension(self._writer)
@output_dimension.setter
def output_dimension(self, value):
self._lgeos.GEOSWKBWriter_setOutputDimension(
self._writer, int(value))
@property
def big_endian(self):
"""Byte order is big endian, True (default) or False"""
return (self._lgeos.GEOSWKBWriter_getByteOrder(self._writer) ==
self._ENDIAN_BIG)
@big_endian.setter
def big_endian(self, value):
self._lgeos.GEOSWKBWriter_setByteOrder(
self._writer, self._ENDIAN_BIG if value else self._ENDIAN_LITTLE)
@property
def include_srid(self):
"""Include SRID, True or False (default)"""
return bool(self._lgeos.GEOSWKBWriter_getIncludeSRID(self._writer))
@include_srid.setter
def include_srid(self, value):
self._lgeos.GEOSWKBWriter_setIncludeSRID(self._writer, bool(value))
def __init__(self, lgeos, **settings):
"""Create WKB Writer"""
self._lgeos = lgeos
self._writer = self._lgeos.GEOSWKBWriter_create()
applied_settings = self.defaults.copy()
applied_settings.update(settings)
for name in applied_settings:
setattr(self, name, applied_settings[name])
def __setattr__(self, name, value):
"""Limit setting attributes"""
if hasattr(self, name):
object.__setattr__(self, name, value)
else:
raise AttributeError('%r object has no attribute %r' %
(self.__class__.__name__, name))
def __del__(self):
"""Destroy WKB Writer"""
if self._lgeos is not None:
self._lgeos.GEOSWKBWriter_destroy(self._writer)
self._writer = None
self._lgeos = None
def write(self, geom):
"""Returns WKB byte string for geometry"""
if geom is None or geom._geom is None:
raise InvalidGeometryError("Null geometry supports no operations")
size = c_size_t()
result = self._lgeos.GEOSWKBWriter_write(
self._writer, geom._geom, pointer(size))
data = string_at(result, size.value)
lgeos.GEOSFree(result)
return data
def write_hex(self, geom):
"""Returns WKB hex string for geometry"""
if geom is None or geom._geom is None:
raise InvalidGeometryError("Null geometry supports no operations")
size = c_size_t()
result = self._lgeos.GEOSWKBWriter_writeHEX(
self._writer, geom._geom, pointer(size))
data = string_at(result, size.value)
lgeos.GEOSFree(result)
return data.decode('ascii')
# Errcheck functions for ctypes
def errcheck_wkb(result, func, argtuple):
"""Returns bytes from a C pointer"""
if not result:
return None
size_ref = argtuple[-1]
size = size_ref.contents
retval = string_at(result, size.value)[:]
lgeos.GEOSFree(result)
return retval
def errcheck_just_free(result, func, argtuple):
"""Returns string from a C pointer"""
retval = string_at(result)
lgeos.GEOSFree(result)
return retval.decode('ascii')
def errcheck_null_exception(result, func, argtuple):
"""Wraps errcheck_just_free
Raises TopologicalError if result is NULL.
"""
if not result:
raise TopologicalError(
"The operation '{}' could not be performed."
"Likely cause is invalidity of the geometry.".format(
func.__name__))
return errcheck_just_free(result, func, argtuple)
def errcheck_predicate(result, func, argtuple):
"""Result is 2 on exception, 1 on True, 0 on False"""
if result == 2:
raise PredicateError("Failed to evaluate %s" % repr(func))
return result
class LGEOSBase(threading.local):
"""Proxy for GEOS C API
This is a base class. Do not instantiate.
"""
methods = {}
def __init__(self, dll):
self._lgeos = dll
self.geos_handle = None
def __del__(self):
"""Cleanup GEOS related processes"""
if self._lgeos is not None:
self._lgeos.finishGEOS()
self._lgeos = None
self.geos_handle = None
class LGEOS330(LGEOSBase):
"""Proxy for GEOS 3.3.0-CAPI-1.7.0
"""
geos_version = (3, 3, 0)
geos_capi_version = (1, 7, 0)
def __init__(self, dll):
super().__init__(dll)
self.geos_handle = self._lgeos.initGEOS_r(notice_h, error_h)
keys = list(self._lgeos.__dict__.keys())
for key in [x for x in keys if not x.endswith('_r')]:
if key + '_r' in keys:
reentr_func = getattr(self._lgeos, key + '_r')
attr = partial(reentr_func, self.geos_handle)
attr.__name__ = reentr_func.__name__
setattr(self, key, attr)
else:
setattr(self, key, getattr(self._lgeos, key))
# GEOS 3.3.8 from homebrew has, but doesn't advertise
# GEOSPolygonize_full. We patch it in explicitly here.
key = 'GEOSPolygonize_full'
func = getattr(self._lgeos, key + '_r')
attr = partial(func, self.geos_handle)
attr.__name__ = func.__name__
setattr(self, key, attr)
# Deprecated
self.GEOSGeomToWKB_buf.func.errcheck = errcheck_wkb
self.GEOSGeomToWKT.func.errcheck = errcheck_just_free
self.GEOSRelate.func.errcheck = errcheck_null_exception
for pred in (
self.GEOSDisjoint,
self.GEOSTouches,
self.GEOSIntersects,
self.GEOSCrosses,
self.GEOSWithin,
self.GEOSContains,
self.GEOSOverlaps,
self.GEOSCovers,
self.GEOSEquals,
self.GEOSEqualsExact,
self.GEOSPreparedDisjoint,
self.GEOSPreparedTouches,
self.GEOSPreparedCrosses,
self.GEOSPreparedWithin,
self.GEOSPreparedOverlaps,
self.GEOSPreparedContains,
self.GEOSPreparedContainsProperly,
self.GEOSPreparedCovers,
self.GEOSPreparedIntersects,
self.GEOSRelatePattern,
self.GEOSisEmpty,
self.GEOSisValid,
self.GEOSisSimple,
self.GEOSisRing,
self.GEOSHasZ,
self.GEOSisClosed,
self.GEOSCoveredBy):
pred.func.errcheck = errcheck_predicate
self.GEOSisValidReason.func.errcheck = errcheck_just_free
self.methods['area'] = self.GEOSArea
self.methods['boundary'] = self.GEOSBoundary
self.methods['buffer'] = self.GEOSBuffer
self.methods['centroid'] = self.GEOSGetCentroid
self.methods['representative_point'] = self.GEOSPointOnSurface
self.methods['convex_hull'] = self.GEOSConvexHull
self.methods['distance'] = self.GEOSDistance
self.methods['envelope'] = self.GEOSEnvelope
self.methods['length'] = self.GEOSLength
self.methods['has_z'] = self.GEOSHasZ
self.methods['is_empty'] = self.GEOSisEmpty
self.methods['is_ring'] = self.GEOSisRing
self.methods['is_simple'] = self.GEOSisSimple
self.methods['is_valid'] = self.GEOSisValid
self.methods['disjoint'] = self.GEOSDisjoint
self.methods['touches'] = self.GEOSTouches
self.methods['intersects'] = self.GEOSIntersects
self.methods['crosses'] = self.GEOSCrosses
self.methods['within'] = self.GEOSWithin
self.methods['contains'] = self.GEOSContains
self.methods['overlaps'] = self.GEOSOverlaps
self.methods['covers'] = self.GEOSCovers
self.methods['equals'] = self.GEOSEquals
self.methods['equals_exact'] = self.GEOSEqualsExact
self.methods['relate'] = self.GEOSRelate
self.methods['difference'] = self.GEOSDifference
self.methods['symmetric_difference'] = self.GEOSSymDifference
self.methods['union'] = self.GEOSUnion
self.methods['intersection'] = self.GEOSIntersection
self.methods['prepared_disjoint'] = self.GEOSPreparedDisjoint
self.methods['prepared_touches'] = self.GEOSPreparedTouches
self.methods['prepared_intersects'] = self.GEOSPreparedIntersects
self.methods['prepared_crosses'] = self.GEOSPreparedCrosses
self.methods['prepared_within'] = self.GEOSPreparedWithin
self.methods['prepared_contains'] = self.GEOSPreparedContains
self.methods['prepared_contains_properly'] = \
self.GEOSPreparedContainsProperly
self.methods['prepared_overlaps'] = self.GEOSPreparedOverlaps
self.methods['prepared_covers'] = self.GEOSPreparedCovers
self.methods['relate_pattern'] = self.GEOSRelatePattern
self.methods['simplify'] = self.GEOSSimplify
self.methods['topology_preserve_simplify'] = \
self.GEOSTopologyPreserveSimplify
self.methods['normalize'] = self.GEOSNormalize
self.methods['cascaded_union'] = self.GEOSUnionCascaded
def parallel_offset(geom, distance, resolution=16, join_style=1,
mitre_limit=5.0, side='right'):
if side == 'right':
distance *= -1
return self.GEOSOffsetCurve(
geom, distance, resolution, join_style, mitre_limit)
self.methods['parallel_offset'] = parallel_offset
self.methods['project'] = self.GEOSProject
self.methods['project_normalized'] = self.GEOSProjectNormalized
self.methods['interpolate'] = self.GEOSInterpolate
self.methods['interpolate_normalized'] = \
self.GEOSInterpolateNormalized
self.methods['buffer_with_style'] = self.GEOSBufferWithStyle
self.methods['hausdorff_distance'] = self.GEOSHausdorffDistance
self.methods['unary_union'] = self.GEOSUnaryUnion
self.methods['cascaded_union'] = self.methods['unary_union']
self.methods['is_closed'] = self.GEOSisClosed
self.methods['snap'] = self.GEOSSnap
self.methods['shared_paths'] = self.GEOSSharedPaths
self.methods['buffer_with_params'] = self.GEOSBufferWithParams
self.methods['covered_by'] = self.GEOSCoveredBy
class LGEOS340(LGEOS330):
"""Proxy for GEOS 3.4.0-CAPI-1.8.0
"""
geos_version = (3, 4, 0)
geos_capi_version = (1, 8, 0)
def __init__(self, dll):
super().__init__(dll)
self.methods['delaunay_triangulation'] = self.GEOSDelaunayTriangulation
self.methods['nearest_points'] = self.GEOSNearestPoints
class LGEOS350(LGEOS340):
"""Proxy for GEOS 3.5.0-CAPI-1.9.0
"""
geos_version = (3, 5, 0)
geos_capi_version = (1, 9, 0)
def __init__(self, dll):
super().__init__(dll)
self.methods['clip_by_rect'] = self.GEOSClipByRect
self.methods['voronoi_diagram'] = self.GEOSVoronoiDiagram
class LGEOS360(LGEOS350):
"""Proxy for GEOS 3.6.0-CAPI-1.10.0
"""
geos_version = (3, 6, 0)
geos_capi_version = (1, 10, 0)
def __init__(self, dll):
super().__init__(dll)
self.methods['minimum_clearance'] = self.GEOSMinimumClearance
class LGEOS380(LGEOS360):
"""Proxy for GEOS 3.8.0-CAPI-1.13.0"""
geos_version = (3, 8, 0)
geos_capi_version = (1, 13, 0)
def __init__(self, dll):
super().__init__(dll)
self.methods['make_valid'] = self.GEOSMakeValid
if geos_version >= (3, 8, 0):
L = LGEOS380
elif geos_version >= (3, 6, 0):
L = LGEOS360
elif geos_version >= (3, 5, 0):
L = LGEOS350
elif geos_version >= (3, 4, 0):
L = LGEOS340
elif geos_version >= (3, 3, 0):
L = LGEOS330
else:
raise ValueError('unexpected geos_version: ' + str(geos_version))
lgeos = L(_lgeos)
def cleanup(proxy):
del proxy
atexit.register(cleanup, lgeos)
|
[] |
[] |
[
"RESOURCEPATH",
"PATH"
] |
[]
|
["RESOURCEPATH", "PATH"]
|
python
| 2 | 0 | |
cmd/session.go
|
package envy
import (
"fmt"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"strings"
"syscall"
"time"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh"
)
func init() {
cmdSession.AddCommand(cmdSessionReload)
cmdSession.AddCommand(cmdSessionCommit)
cmdSession.AddCommand(cmdSessionSwitch)
Cmd.AddCommand(cmdSession)
}
var cmdSession = &cobra.Command{
Use: "session",
Run: func(cmd *cobra.Command, args []string) {
cmd.Usage()
},
}
var cmdSessionReload = &cobra.Command{
Short: "reload session from environment image",
Long: `Reload recreates the current session container from the environment image.`,
Use: "reload",
Run: func(cmd *cobra.Command, args []string) {
session := GetSession(os.Getenv("ENVY_USER"), os.Getenv("ENVY_SESSION"))
log.Println(session.User.Name, "| reloading session", session.Name)
os.Exit(128)
},
}
var cmdSessionCommit = &cobra.Command{
Short: "commit session changes to environment image",
Long: `Commit saves changes made in the session to the environment image.`,
Use: "commit [<environ>]",
Run: func(cmd *cobra.Command, args []string) {
session := GetSession(os.Getenv("ENVY_USER"), os.Getenv("ENVY_SESSION"))
var environ *Environ
if len(args) > 0 {
environ = GetEnviron(os.Getenv("ENVY_USER"), args[0])
} else {
environ = session.Environ()
}
log.Println(session.User.Name, "| committing session", session.Name, "to", environ.Name)
fmt.Fprintf(os.Stdout, "Committing to %s ...\n", environ.Name)
dockerCommit(session.DockerName(), environ.DockerImage())
os.Exit(128)
},
}
var cmdSessionSwitch = &cobra.Command{
Short: "switch session to different environment",
Long: `Switch reloads session from a new environment image.`,
Use: "switch <environ>",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
return
}
session := GetSession(os.Getenv("ENVY_USER"), os.Getenv("ENVY_SESSION"))
session.SetEnviron(args[0])
log.Println(session.User.Name, "| switching session", session.Name, "to", args[0])
os.Exit(128)
},
}
type Session struct {
User *User
Name string
}
func (s *Session) Environ() *Environ {
return GetEnviron(s.User.Name, readFile(s.Path("environ")))
}
func (s *Session) SetEnviron(name string) {
writeFile(s.Path("environ"), name)
}
func (s *Session) Path(parts ...string) string {
return Envy.Path(append([]string{"users", s.User.Name, "sessions", s.Name}, parts...)...)
}
func (s *Session) DockerName() string {
return s.Name
}
func (s *Session) Enter(environ *Environ) int {
defer s.Cleanup()
log.Println(s.User.Name, "| entering session", s.Name)
os.Setenv("ENVY_USER", s.User.Name)
os.Setenv("ENVY_SESSION", s.Name)
s.SetEnviron(environ.Name)
fmt.Fprintln(os.Stdout, "Entering session...")
envySock := startSessionServer(s.Path("run/envy.sock"))
defer envySock.Close()
for {
dockerRemove(s.Name)
environ := s.Environ()
args := []string{"run", "-it",
fmt.Sprintf("--name=%s", s.Name),
fmt.Sprintf("--net=container:%s", environ.DockerName()),
fmt.Sprintf("--env=HOSTNAME=%s", environ.Name),
fmt.Sprintf("--env=ENVY_RELOAD=%v", int32(time.Now().Unix())),
fmt.Sprintf("--env=ENVY_SESSION=%s", s.Name),
fmt.Sprintf("--env=ENVY_USER=%s", s.User.Name),
"--env=DOCKER_HOST=unix:///var/run/docker.sock",
"--env=ENV=/etc/envyrc",
fmt.Sprintf("--volume=%s:/var/run/docker.sock", Envy.HostPath(environ.Path("run/docker.sock"))),
fmt.Sprintf("--volume=%s:/var/run/envy.sock:ro", Envy.HostPath(s.Path("run/envy.sock"))),
fmt.Sprintf("--volume=%s:/etc/envyrc:ro", Envy.HostPath(environ.Path("envyrc"))),
fmt.Sprintf("--volume=%s:/root/environ", Envy.HostPath(environ.Path())),
fmt.Sprintf("--volume=%s:/root", Envy.HostPath(s.User.Path("root"))),
fmt.Sprintf("--volume=%s:/home/%s", Envy.HostPath(s.User.Path("home")), s.User.Name),
fmt.Sprintf("--volume=%s:/sbin/envy:ro", Envy.HostPath("bin/envy")),
}
if s.User.Admin() {
args = append(args, fmt.Sprintf("--volume=%s:/envy", Envy.HostPath()))
}
args = append(args, environ.DockerImage())
if dockerShellCmd(environ.DockerImage()) != nil {
args = append(args, dockerShellCmd(environ.DockerImage())...)
}
status := run(exec.Command("/bin/docker", args...))
if status != 128 {
return status
}
}
}
func (s *Session) Cleanup() {
log.Println("Cleaning up")
dockerRemove(s.Name)
os.Remove(s.Path("run/envy.sock"))
}
func NewSession(user string) *Session {
return GetSession(user, nextSessionName(GetUser(user)))
}
func GetSession(user, name string) *Session {
u := GetUser(user)
s := &Session{
Name: name,
User: u,
}
mkdirAll(s.Path("run"))
return s
}
func nextSessionName(user *User) string {
n := 0
// TODO: panic on max n
// TODO: clean up sessions without docker running
for {
s := user.Session(fmt.Sprintf("%s.%v", user.Name, n))
if !exists(s.Path()) {
return s.Name
}
n += 1
}
}
func startSessionServer(path string) net.Listener {
os.Remove(path)
ln, err := net.Listen("unix", path)
assert(err)
go func() {
for {
conn, err := ln.Accept()
if err != nil {
break
}
go handleSSHConn(conn)
}
}()
return ln
}
func handleSSHConn(conn net.Conn) {
defer conn.Close()
config := &ssh.ServerConfig{NoClientAuth: true}
privateBytes, err := ioutil.ReadFile(Envy.DataPath("id_host"))
assert(err)
private, err := ssh.ParsePrivateKey(privateBytes)
assert(err)
config.AddHostKey(private)
_, chans, reqs, err := ssh.NewServerConn(conn, config)
if err != nil {
log.Println(err)
return
}
go ssh.DiscardRequests(reqs)
for ch := range chans {
if ch.ChannelType() != "session" {
ch.Reject(ssh.UnknownChannelType, "unsupported channel type")
continue
}
go handleSSHChannel(ch)
}
}
func handleSSHChannel(newChan ssh.NewChannel) {
ch, reqs, err := newChan.Accept()
if err != nil {
log.Println("handle channel failed:", err)
return
}
for req := range reqs {
go func(req *ssh.Request) {
if req.WantReply {
req.Reply(true, nil)
}
switch req.Type {
case "exec":
defer ch.Close()
var payload = struct{ Value string }{}
ssh.Unmarshal(req.Payload, &payload)
line := strings.Trim(payload.Value, "\n")
var args []string
if line != "" {
args = strings.Split(line, " ")
}
cmd := exec.Command("/bin/envy", args...)
cmd.Stdout = ch
cmd.Stderr = ch.Stderr()
err := cmd.Run()
status := struct{ Status uint32 }{0}
if err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
if stat, ok := exiterr.Sys().(syscall.WaitStatus); ok {
status = struct{ Status uint32 }{uint32(stat.ExitStatus())}
} else {
assert(err)
}
}
}
_, err = ch.SendRequest("exit-status", false, ssh.Marshal(&status))
assert(err)
return
}
}(req)
}
}
|
[
"\"ENVY_USER\"",
"\"ENVY_SESSION\"",
"\"ENVY_USER\"",
"\"ENVY_SESSION\"",
"\"ENVY_USER\"",
"\"ENVY_USER\"",
"\"ENVY_SESSION\""
] |
[] |
[
"ENVY_SESSION",
"ENVY_USER"
] |
[]
|
["ENVY_SESSION", "ENVY_USER"]
|
go
| 2 | 0 | |
trac/tests/functional/svntestenv.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import os
import re
from subprocess import call
from testenv import FunctionalTestEnvironment
from trac.util.compat import close_fds
class SvnFunctionalTestEnvironment(FunctionalTestEnvironment):
def work_dir(self):
return os.path.join(self.dirname, 'workdir')
def repo_path(self, filename):
return os.path.join(self.dirname, filename)
def repo_path_for_initenv(self):
return self.repo_path('repo')
def create_repo(self):
"""
Initialize a repo of the type :attr:`self.repotype`.
"""
self.svnadmin_create()
if call(['svn', 'co', self.repo_url(), self.work_dir()],
stdout=self.logfile, stderr=self.logfile,
close_fds=close_fds):
raise Exception('Checkout from %s failed.' % self.repo_url())
def destroy_repo(self):
"""The deletion of the test environment will remove the
repo as well."""
pass
def post_create(self, env):
"""Hook for modifying the environment after creation."""
self._tracadmin('config', 'set', 'repositories',
'.sync_per_request', '1')
def repo_url(self):
"""Returns the url of the Subversion repository for this test
environment.
"""
repodir = self.repo_path_for_initenv()
if os.name == 'nt':
return 'file:///' + repodir.replace("\\", "/")
else:
return 'file://' + repodir
def svnadmin_create(self, filename=None):
"""Subversion helper to create a new repository."""
if filename is None:
path = self.repo_path_for_initenv()
else:
path = self.repo_path(filename)
if call(["svnadmin", "create", path],
stdout=self.logfile, stderr=self.logfile, close_fds=close_fds):
raise Exception('unable to create subversion repository: %r' %
path)
return path
def svn_mkdir(self, paths, msg, username='admin'):
"""Subversion helper to create a new directory within the main
repository. Operates directly on the repository url, so a working
copy need not exist.
Example::
self._testenv.svn_mkdir(["abc", "def"], "Add dirs")
"""
self.call_in_workdir(['svn', '--username=%s' % username,
'mkdir', '-m', msg]
+ [self.repo_url() + '/' + d for d in paths])
self.call_in_workdir(['svn', 'update'])
def svn_add(self, filename, data, msg=None, username='admin'):
"""Subversion helper to add a file to the given path within the main
repository.
Example::
self._testenv.svn_add("root.txt", "Hello World")
"""
with open(os.path.join(self.work_dir(), filename), 'w') as f:
f.write(data)
self.call_in_workdir(['svn', 'add', filename])
environ = os.environ.copy()
environ['LC_ALL'] = 'C' # Force English messages in svn
msg = 'Add %s' % filename if msg is None else msg
output = self.call_in_workdir(['svn', '--username=%s' % username,
'commit', '-m', msg, filename],
environ=environ)
try:
revision = re.search(r'Committed revision ([0-9]+)\.',
output).group(1)
except Exception as e:
args = e.args + (output, )
raise Exception(*args)
return int(revision)
def call_in_workdir(self, args, environ=None):
return self.call_in_dir(self.work_dir(), args, environ)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/main/java/com/oci/caas/pciecommerce/rest/TestRestController.java
|
package com.oci.caas.pciecommerce.rest;
import com.oci.caas.pciecommerce.model.Person;
import com.stripe.Stripe;
import com.stripe.exception.StripeException;
import com.stripe.model.PaymentIntent;
import com.stripe.param.PaymentIntentCreateParams;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.*;
import java.util.ArrayList;
/**
* Handler used for testing of templating (through /greeting)
* and stripe api (through /payment)
*/
@Controller
public class TestRestController {
/**
*
*/
static class CreatePaymentBody {
private Object[] items;
private String currency;
public Object[] getItems() {
return items;
}
public String getCurrency() {
return currency;
}
}
/**
*
*/
static class CreatePaymentResponse {
private String publishableKey;
private String clientSecret;
public CreatePaymentResponse(String publishableKey, String clientSecret) {
this.publishableKey = publishableKey;
this.clientSecret = clientSecret;
}
public String getClientSecret() {
return clientSecret;
}
public void setClientSecret(String clientSecret) {
this.clientSecret = clientSecret;
}
public String getPublishableKey() {
return publishableKey;
}
public void setPublishableKey(String publishableKey) {
this.publishableKey = publishableKey;
}
}
/**
*
* @param items
* @return
*/
static int calculateOrderAmount(Object[] items) {
// Replace this constant with a calculation of the order's amount
// Calculate the order total on the server to prevent
// users from directly manipulating the amount on the client
return 1000;
}
@Autowired
JdbcTemplate jdbcTemplate;
/**
*
* @param name
* @param model
* @return
*/
@GetMapping("/greeting")
public String greeting(@RequestParam(name = "name", required = false, defaultValue = "World") String name, Model model) {
String query = "Select personid, firstname, lastname from persons";
ArrayList<Person> personArrayList = new ArrayList<>();
jdbcTemplate.query(
query,
(rs, rowNum) -> new Person(rs.getLong("PERSONID"), rs.getString("FIRSTNAME"), rs.getString("LASTNAME"))
).forEach(person -> {
personArrayList.add(person);
System.out.println(person.toString());});
model.addAttribute("name", name);
model.addAttribute("persons", personArrayList);
return "greeting";
}
/**
*
* @param postBody
* @return
* @throws StripeException
*/
@PostMapping(value = "/create-payment-intent", produces = "application/json")
@ResponseBody
public CreatePaymentResponse secret(@RequestBody CreatePaymentBody postBody) throws StripeException {
String private_key = System.getenv("STRIPE_SECRET_KEY");
String public_key = System.getenv("STRIPE_PUBLISHABLE_KEY");
Stripe.apiKey = private_key;
PaymentIntentCreateParams createParams = new PaymentIntentCreateParams.Builder()
.setCurrency("usd")
.setAmount(new Long(calculateOrderAmount(postBody.getItems())))
.build();
// Create a PaymentIntent with the order amount and currency
PaymentIntent intent = PaymentIntent.create(createParams);
// Send publishable key and PaymentIntent details to client
CreatePaymentResponse paymentResponse = new CreatePaymentResponse(public_key, intent.getClientSecret());
return paymentResponse;
}
}
|
[
"\"STRIPE_SECRET_KEY\"",
"\"STRIPE_PUBLISHABLE_KEY\""
] |
[] |
[
"STRIPE_SECRET_KEY",
"STRIPE_PUBLISHABLE_KEY"
] |
[]
|
["STRIPE_SECRET_KEY", "STRIPE_PUBLISHABLE_KEY"]
|
java
| 2 | 0 | |
python/oracle.py
|
#!/usr/bin/python
"""
Test oracle client
Author: John Newby
Copyright (c) 2018 aeternity developers
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all
copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
import asyncio
from epoch import Epoch
import json
import os
from websocket import create_connection
class Oracle:
def __init__(self):
self.pub_key = os.environ['AE_PUB_KEY']
self.url = "ws://localhost:" + os.environ['AE_WEBSOCKET'] + "/websocket"
self.websocket = None
self.local_port = os.environ['AE_LOCAL_PORT']
self.local_internal_port = os.environ['AE_LOCAL_INTERNAL_PORT']
self.epoch = Epoch()
def connect_websocket(self):
if not self.websocket:
self.websocket = create_connection(self.url)
def register(self, query_format, response_format, query_fee, ttl, fee):
self.connect_websocket()
query = { "target": "oracle",
"action": "register",
"payload": { "type": "OracleRegisterTxObject",
"vsn": 1,
"account": self.pub_key,
"query_format": query_format,
"response_format": response_format,
"query_fee": int(query_fee),
"ttl": {"type": "delta",
"value": int(ttl)},
"fee": int(fee) } }
j = json.dumps(query)
print(j)
self.epoch.update_top_block()
self.websocket.send(j)
response = json.loads(self.websocket.recv())
if not response['payload']['result'] == "ok":
raise RuntimeError(response)
oracle_id = response['payload']['oracle_id']
self.epoch.wait_for_block()
return oracle_id
def wait_for_block(self):
self.epoch.update_top_block()
self.epoch.wait_for_block()
def subscribe(self, oracle_id, callback = None):
self.connect_websocket()
query = {"target": "oracle",
"action": "subscribe",
"payload": {"type": "query",
"oracle_id": oracle_id }}
j = json.dumps(query)
self.websocket.send(j)
while True:
response = json.loads(self.websocket.recv())
print(response)
if response['action'] == 'mined_block':
continue
if not response['payload']['result'] == 'ok':
raise RuntimeError(response)
id = response['payload']['subscribed_to']['oracle_id']
break
mining_events = 0
while True:
data = self.websocket.recv()
j = json.loads(data)
print(j)
if j['action'] == 'mined_block':
mining_events += 1
continue
if j['action'] == 'new_oracle_query':
if callback:
callback(j)
else:
print("Unhandled")
if mining_events == 0:
self.epoch.wait_for_block()
def query(self, oracle_pubkey, query_fee, query_ttl, response_ttl,
fee, query):
self.connect_websocket()
request = {"target": "oracle",
"action": "query",
"payload": {"type": "OracleQueryTxObject",
"vsn": 1,
"oracle_pubkey": oracle_pubkey,
"query_fee": int(query_fee),
"query_ttl": {"type": "delta",
"value": int(query_ttl)},
"response_ttl": {"type": "delta",
"value": int(response_ttl)},
"fee": int(fee),
"query": query }}
j = json.dumps(request)
print(j)
self.websocket.send(j)
response = self.websocket.recv()
print(response)
response = json.loads(response)
if response['payload']['result'] == "ok":
return response['payload']['query_id']
self.epoch.wait_for_block()
return False
def subscribe_query(self, query_id, callback = None):
self.connect_websocket()
request = {"target": "oracle",
"action": "subscribe",
"payload": {"type": "response",
"query_id": query_id }}
j = json.dumps(request)
print(j)
self.websocket.send(j)
# check response, might have to consume a block mined message
while True:
blocks_mined = 0
response = self.websocket.recv()
response = json.loads(response)
print(response)
if response['action'] == 'mined_block':
blocks_mined += 1
continue
if response['action'] == 'new_oracle_response':
if callback:
callback(response['payload'])
else:
print(response['payload'])
break
# Should we get here?
if not response['payload']['result'] == 'ok':
raise RuntimeError(response)
def respond(self, query_id, fee, reply):
self.connect_websocket()
response = {"target": "oracle",
"action": "response",
"payload": {"type": "OracleResponseTxObject",
"vsn": 1,
"query_id": query_id,
"fee": int(fee),
"response": reply}}
response = json.dumps(response)
print(response)
self.websocket.send(response)
|
[] |
[] |
[
"AE_LOCAL_PORT",
"AE_WEBSOCKET",
"AE_PUB_KEY",
"AE_LOCAL_INTERNAL_PORT"
] |
[]
|
["AE_LOCAL_PORT", "AE_WEBSOCKET", "AE_PUB_KEY", "AE_LOCAL_INTERNAL_PORT"]
|
python
| 4 | 0 | |
function-logs/handler.go
|
package function
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/openfaas/openfaas-cloud/sdk"
)
// Handle grabs the logs for the fn that is named in the input
func Handle(req []byte) string {
user := string(req)
var function string
if len(user) == 0 {
if query, exists := os.LookupEnv("Http_Query"); exists {
vals, _ := url.ParseQuery(query)
userQuery := vals.Get("user")
function = vals.Get("function")
if len(userQuery) > 0 {
user = userQuery
}
}
}
if len(user) == 0 {
log.Fatalf("User is required as POST or querystring i.e. ?user=alexellis.")
}
gatewayURL := os.Getenv("gateway_url")
allowed, err := isUserFunction(function, gatewayURL, user)
if err != nil {
log.Fatalf("there was an error requesting the function %q", function)
}
if !allowed {
log.Fatalf("requested function %q could not be found or you are not allowed to access it", function)
}
formattedLogs, fmtErr := getFormattedLogs(gatewayURL, function)
if fmtErr != nil {
log.Fatalf("there was an error formatting logs for the function %q, %s", function, fmtErr)
}
return formattedLogs
}
func getFormattedLogs(gatewayURL string, function string) (string, error) {
if len(function) == 0 {
return "", errors.New("function name was empty, please provide a valid function name")
}
queryParams := make(map[string]string)
queryParams["name"] = function
queryParams["follow"] = "false"
queryParams["since"] = time.Now().Add(-1 * time.Minute * 30).Format(time.RFC3339)
response, bodyBytes := makeGatewayHttpReq(gatewayURL+"/system/logs", queryParams)
if response.StatusCode != http.StatusOK {
return "", errors.New(fmt.Sprintf("unable to query logs, status: %d, message: %s", response.StatusCode, string(bodyBytes)))
}
formattedLogs, formatErr := formatLogs(bodyBytes)
if formatErr != nil {
return "", formatErr
}
return formattedLogs, nil
}
func isUserFunction(function string, gatewayURL string, user string) (bool, error) {
queryParams := make(map[string]string)
queryParams["user"] = user
if len(user) == 0 {
return false, errors.New("user is not set, user must be set for us to find logs")
}
response, bodyBytes := makeGatewayHttpReq(gatewayURL+"/function/list-functions", queryParams)
if response.StatusCode != http.StatusOK {
return false, errors.New(fmt.Sprintf("unable to query functions list, status: %d, message: %s", response.StatusCode, string(bodyBytes)))
}
res, err := functionInResponse(bodyBytes, function, user)
if err != nil {
return false, err
}
return res, nil
}
func formatLogs(msgBody []byte) (string, error) {
if len(msgBody) == 0 {
return "", nil
}
var b strings.Builder
for _, line := range strings.Split(strings.TrimSuffix(string(msgBody), "\n"), "\n") {
data := Message{}
if err := json.Unmarshal([]byte(line), &data); err != nil {
return "", err
}
b.WriteString(data.Text)
}
return strings.TrimRight(b.String(), "\n"), nil
}
func functionInResponse(bodyBytes []byte, function string, owner string) (bool, error) {
functions := []sdk.Function{}
mErr := json.Unmarshal(bodyBytes, &functions)
if mErr != nil {
return false, mErr
}
for _, fn := range functions {
if fn.Name == function {
return fn.Labels["com.openfaas.cloud.git-owner"] == owner, nil
}
}
return false, nil
}
func makeGatewayHttpReq(URL string, queryParams map[string]string) (*http.Response, []byte) {
c := http.Client{
Timeout: time.Second * 3,
}
httpReq, _ := http.NewRequest(http.MethodGet, URL, nil)
query := url.Values{}
for key, value := range queryParams {
query.Add(key, value)
}
addAuthErr := sdk.AddBasicAuth(httpReq)
if addAuthErr != nil {
log.Fatalf("Basic auth error %s", addAuthErr)
}
httpReq.URL.RawQuery = query.Encode()
response, err := c.Do(httpReq)
if err != nil {
log.Fatal(err)
}
defer response.Body.Close()
bodyBytes, bErr := ioutil.ReadAll(response.Body)
if bErr != nil {
log.Fatal(bErr)
}
return response, bodyBytes
}
type Message struct {
Name string `json:"name"`
Instance string `json:"instance"`
Timestamp time.Time `json:"timestamp"`
Text string `json:"text"`
}
|
[
"\"gateway_url\""
] |
[] |
[
"gateway_url"
] |
[]
|
["gateway_url"]
|
go
| 1 | 0 | |
server.go
|
package main
import (
"context"
"net/http"
"os"
"time"
"github.com/byuoitav/central-event-system/hub/base"
"github.com/byuoitav/central-event-system/messenger"
"github.com/byuoitav/common"
"github.com/byuoitav/common/log"
"github.com/byuoitav/common/nerr"
"github.com/byuoitav/common/v2/auth"
"github.com/byuoitav/common/v2/events"
"github.com/byuoitav/shipwright/actions"
"github.com/byuoitav/shipwright/alertstore"
"github.com/byuoitav/shipwright/couch"
"github.com/byuoitav/shipwright/state/roomsync"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
// imported to initialize the list of then's
_ "github.com/byuoitav/shipwright/actions/then/circular"
"github.com/byuoitav/shipwright/handlers"
"github.com/byuoitav/shipwright/socket"
figure "github.com/common-nighthawk/go-figure"
)
func init() {
if os.Getenv("NO_PULL") == "" {
err := resetConfig(context.Background())
if err != nil {
log.L.Fatalf(err.Error())
}
}
}
func main() {
log.SetLevel("info")
figure.NewFigure("SMEE", "univers", true).Print()
port := ":9999"
router := common.NewRouter()
go actions.DefaultActionManager().Start(context.TODO())
alertstore.InitializeAlertStore(actions.DefaultActionManager())
go roomsync.StartRoomSync(24*time.Hour, context.Background())
// connect to the hub
messenger, err := messenger.BuildMessenger(os.Getenv("HUB_ADDRESS"), base.Messenger, 5000)
if err != nil {
log.L.Fatalf("failed to build messenger: %s", err)
}
// get events from the hub
go func() {
messenger.SubscribeToRooms("*")
for {
processEvent(messenger.ReceiveEvent())
}
}()
// get events from external sources
router.POST("/event", func(ctx echo.Context) error {
e := events.Event{}
err := ctx.Bind(&e)
if err != nil {
return ctx.String(http.StatusBadRequest, err.Error())
}
processEvent(e)
return ctx.String(http.StatusOK, "processing event")
})
writeconfig := router.Group(
"",
auth.CheckHeaderBasedAuth,
// echo.WrapMiddleware(auth.AuthenticateCASUser),
auth.AuthorizeRequest("write-config", "configuration", func(c echo.Context) string { return "all" }),
)
readconfig := router.Group(
"",
auth.CheckHeaderBasedAuth,
// echo.WrapMiddleware(auth.AuthenticateCASUser),
auth.AuthorizeRequest("read-config", "configuration", func(c echo.Context) string { return "all" }),
)
/*
writestate := router.Group(
"",
auth.CheckHeaderBasedAuth,
echo.WrapMiddleware(auth.AuthenticateCASUser),
auth.AuthorizeRequest("write-state", "configuration", func(c echo.Context) string { return "all" }),
)
readstate := router.Group(
"",
auth.CheckHeaderBasedAuth,
echo.WrapMiddleware(auth.AuthenticateCASUser),
auth.AuthorizeRequest("read-state", "configuration", func(c echo.Context) string { return "all" }),
)
*/
//Screenshots
router.POST("/screenshot", handlers.GetScreenshot)
router.GET("/actions", actions.DefaultActionManager().Info)
router.GET("/actions/trigger/:trigger", actions.DefaultActionManager().Config.ActionsByTrigger)
// Building Endpoints
writeconfig.POST("/buildings/:building", handlers.AddBuilding)
writeconfig.POST("/buildings", handlers.AddMultipleBuildings)
readconfig.GET("/buildings/:building", handlers.GetBuilding)
readconfig.GET("/buildings", handlers.GetAllBuildings)
writeconfig.PUT("/buildings/:building/update", handlers.UpdateBuilding)
writeconfig.PUT("/buildings/update", handlers.UpdateMultipleBuildings)
writeconfig.GET("/buildings/:building/delete", handlers.DeleteBuilding)
// Room Endpoints
writeconfig.POST("/rooms/:room", handlers.AddRoom)
writeconfig.POST("/rooms", handlers.AddMultipleRooms)
readconfig.GET("/rooms/:room", handlers.GetRoom)
readconfig.GET("/rooms", handlers.GetAllRooms)
readconfig.GET("/buildings/:building/rooms", handlers.GetRoomsByBuilding)
writeconfig.PUT("/rooms/:room/update", handlers.UpdateRoom)
writeconfig.PUT("/rooms/update", handlers.UpdateMultipleRooms)
writeconfig.GET("/rooms/:room/delete", handlers.DeleteRoom)
readconfig.GET("/rooms/configurations", handlers.GetRoomConfigurations)
readconfig.GET("/rooms/designations", handlers.GetRoomDesignations)
readconfig.GET("/rooms/:roomID/schedule", handlers.GetRoomClassSchedule)
readconfig.GET("/rooms/:roomID/attachments", handlers.GetRoomAttachments)
writeconfig.DELETE("/rooms/:roomID/nuke", handlers.NukeRoom)
// Device Endpoints
writeconfig.POST("/devices/:device", handlers.AddDevice)
writeconfig.POST("/devices", handlers.AddMultipleDevices)
readconfig.GET("/devices/:device", handlers.GetDevice)
readconfig.GET("/devices", handlers.GetAllDevices)
readconfig.GET("/rooms/:room/devices", handlers.GetDevicesByRoom)
readconfig.GET("/rooms/:room/devices/roles/:role", handlers.GetDevicesByRoomAndRole)
readconfig.GET("/devices/types/:type/roles/:role", handlers.GetDevicesByTypeAndRole)
writeconfig.PUT("/devices/:device/update", handlers.UpdateDevice)
writeconfig.PUT("/devices/update", handlers.UpdateMultipleDevices)
writeconfig.GET("/devices/:device/delete", handlers.DeleteDevice)
readconfig.GET("/devices/types", handlers.GetDeviceTypes)
readconfig.GET("/devices/roles", handlers.GetDeviceRoles)
readconfig.GET("/devices/:hostname/address", handlers.GetDeviceRawIPAddress)
// UIConfig Endpoints
writeconfig.POST("/uiconfigs/:config", handlers.AddUIConfig)
writeconfig.POST("/uiconfigs", handlers.AddMultipleUIConfigs)
readconfig.GET("/uiconfigs/:config", handlers.GetUIConfig)
readconfig.GET("/uiconfigs", handlers.GetAllUIConfigs)
writeconfig.PUT("/uiconfigs/:config/update", handlers.UpdateUIConfig)
writeconfig.PUT("/uiconfigs/update", handlers.UpdateMultipleUIConfigs)
writeconfig.GET("/uiconfigs/:config/delete", handlers.DeleteUIConfig)
// Options Endpoints
readconfig.GET("/options/icons", handlers.GetIcons)
readconfig.GET("/options/templates", handlers.GetTemplates)
readconfig.GET("/options/menutree", handlers.GetMenuTree)
// Attributes Endpoints
readconfig.GET("/attributes", handlers.GetAllAttributeGroups)
readconfig.GET("/attributes/:groupID", handlers.GetAttributeGroup)
// Auth Endpoints
readconfig.GET("/users/current/username", handlers.GetUsername)
readconfig.GET("/users/current/permissions", handlers.GetUserPermissions)
// Static Record Endpoints
readconfig.GET("/static/devices", handlers.GetAllStaticDeviceRecords)
readconfig.GET("/static/devices/:device", handlers.GetStaticDeviceRecord)
readconfig.GET("/static/rooms", handlers.GetAllStaticRoomRecords)
readconfig.GET("/static/rooms/state", handlers.GetAllRoomCombinedStateRecords)
readconfig.GET("/static/rooms/:room/state", handlers.GetRoomCombinedStateRecord)
readconfig.PUT("/static/rooms/:room/maintenance", handlers.UpdateStaticRoom)
// Alert Endpoints
readconfig.GET("/issues", handlers.GetAllRoomIssues)
readconfig.PUT("/issues", handlers.UpdateRoomIssue)
readconfig.PUT("/issues/:issueID/resolve", handlers.ResolveIssue)
readconfig.GET("/issues/:issueID", handlers.GetRoomIssue)
readconfig.GET("/issues/resolutions", handlers.GetClosureCodes)
readconfig.GET("/issues/queue", handlers.GetAlertStoreQueueStatus)
writeconfig.PUT("/alerts/add", handlers.AddAlert)
readconfig.GET("/alerts/responders", handlers.GetResponders)
// Websocket Endpoints
router.GET("/ws", socket.UpgradeToWebsocket(socket.GetManager()))
router.Use(auth.CheckHeaderBasedAuth,
auth.CheckHeaderBasedAuth,
// echo.WrapMiddleware(auth.AuthenticateCASUser),
auth.AuthorizeRequest("read-config", "configuration", func(c echo.Context) string { return "all" }),
middleware.StaticWithConfig(middleware.StaticConfig{
Root: "web-dist",
Index: "index.html",
HTML5: true,
Browse: true,
}))
server := http.Server{
Addr: port,
MaxHeaderBytes: 1024 * 10,
}
router.StartServer(&server)
}
func processEvent(event events.Event) {
actions.DefaultActionManager().EventStream <- event
}
func resetConfig(actionManagerCtx context.Context) *nerr.E {
log.L.Infof("Reseting config for shipwright")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err := couch.UpdateConfigFiles(ctx, "shipwright")
if err != nil {
return err.Addf("unable to reset config")
}
// then reset the action manager
return nil
}
|
[
"\"NO_PULL\"",
"\"HUB_ADDRESS\""
] |
[] |
[
"NO_PULL",
"HUB_ADDRESS"
] |
[]
|
["NO_PULL", "HUB_ADDRESS"]
|
go
| 2 | 0 | |
benchmark/postgres/util.py
|
#!/usr/bin/env python3
import os
import time
import yaml
import json
import numpy as np
from resource import getrusage as resource_usage, RUSAGE_SELF
from time import time as timestamp
path_join = os.path.join
_dir = os.path.dirname(os.path.realpath(__file__))
workload_dir = path_join(_dir, "..", "workload")
dataset_info = path_join(workload_dir, "dataset.yaml")
trace_dir = path_join(workload_dir, "trace")
import sqlalchemy
import psycopg2
DB = os.environ.get("DB", "zed")
def db_conn(conn_str=f"postgresql://{DB}:{DB}@localhost/{DB}",
use_sqlalchemy=False):
if use_sqlalchemy:
db = sqlalchemy.create_engine(conn_str)
return db.connect()
else:
return psycopg2.connect(conn_str)
def workload_config(name, query=None):
with open(path_join(workload_dir, name + ".yaml")) as f:
wc = yaml.load(f, Loader=yaml.Loader)
if query is not None:
qc = wc["query"][query]
if "from" in qc:
qc = {**wc["query"][qc["from"]], **qc}
return qc
return wc
# Note: copied from ../util.py
def unix_time(function, *args, **kwargs):
'''Return `real`, `sys` and `user` elapsed time, like UNIX's command `time`
You can calculate the amount of used CPU-time used by your
function/callable by summing `user` and `sys`. `real` is just like the wall
clock.
Note that `sys` and `user`'s resolutions are limited by the resolution of
the operating system's software clock (check `man 7 time` for more
details).
'''
start_time, start_resources = timestamp(), resource_usage(RUSAGE_SELF)
r = function(*args, **kwargs)
end_resources, end_time = resource_usage(RUSAGE_SELF), timestamp()
return {'return': r,
'real': end_time - start_time,
'sys': end_resources.ru_stime - start_resources.ru_stime,
'user': end_resources.ru_utime - start_resources.ru_utime}
def benchmark(fn, init_fn=None, *init_args, num_iter=10, **init_kwargs):
'''Benchmarks fn a specified number of times (num_iter). Calls init_fn
before each time it calls fn, to allow for custom per-iteration
initialization.
'''
_real, _sys, _user = list(), list(), list()
_return = None
for _ in range(num_iter):
if init_fn:
(args, kwargs) = init_fn(*init_args, **init_kwargs)
else:
args = init_args
kwargs = init_kwargs
t = unix_time(fn, *args, **kwargs)
_real.append(t["real"])
_sys.append(t["sys"])
_user.append(t["user"])
# take the last run only
_return = t["return"]
return {
"return": _return,
"real": round(np.mean(_real), 5),
"user": round(np.mean(_user), 5),
"sys": round(np.mean(_sys), 5),
}
def timed(fn):
def timeit(*args, **kw):
ts = time.time()
result = fn(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', fn.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print('%r %2.2f s' % (fn.__name__, (te - ts) * 1))
return result
return timeit
def write_csv(rows: list, name: str):
with open(name, "w") as f:
header = ",".join(map(str, rows[0].keys()))
f.write(header + "\n")
for r in rows:
f.write(",".join(map(str, r.values())))
f.write("\n")
def read_trace(name):
tr = list()
with open(path_join(trace_dir, name)) as f:
if name.endswith("ndjson"):
for line in f:
tr.append(json.loads(line))
else:
raise NotImplemented
return tr
def main():
read_trace("network_log_search_30.ndjson")
if __name__ == '__main__':
main()
|
[] |
[] |
[
"DB"
] |
[]
|
["DB"]
|
python
| 1 | 0 | |
tests/cmd/stability/main.go
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"time"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/tests"
"github.com/pingcap/tidb-operator/tests/pkg/apimachinery"
"github.com/pingcap/tidb-operator/tests/pkg/client"
"github.com/pingcap/tidb-operator/tests/pkg/fixture"
"github.com/pingcap/tidb-operator/tests/pkg/metrics"
"github.com/pingcap/tidb-operator/tests/slack"
"github.com/robfig/cron"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/component-base/logs"
"k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/utils/pointer"
)
var cfg *tests.Config
var certCtx *apimachinery.CertContext
var upgradeVersions []string
func init() {
client.RegisterFlags()
}
func main() {
logs.InitLogs()
defer logs.FlushLogs()
go func() {
if err := http.ListenAndServe(":6060", nil); err != nil {
log.Fail(err.Error())
}
}()
metrics.StartServer()
cfg = tests.ParseConfigOrDie()
upgradeVersions = cfg.GetUpgradeTidbVersionsOrDie()
ns := os.Getenv("NAMESPACE")
var err error
certCtx, err = apimachinery.SetupServerCert(ns, tests.WebhookServiceName)
if err != nil {
panic(err)
}
go tests.StartValidatingAdmissionWebhookServerOrDie(certCtx)
c := cron.New()
if err := c.AddFunc("0 0 10 * * *", func() {
slack.NotifyAndCompletedf("Succeed %d times in the past 24 hours.", slack.SuccessCount)
slack.SuccessCount = 0
}); err != nil {
panic(err)
}
go c.Start()
wait.Forever(run, 5*time.Minute)
}
func run() {
cli, kubeCli, asCli, aggrCli, apiExtCli := client.NewCliOrDie()
ocfg := newOperatorConfig()
cluster1 := newTidbClusterConfig("ns1", "cluster1")
cluster2 := newTidbClusterConfig("ns2", "cluster2")
cluster3 := newTidbClusterConfig("ns2", "cluster3")
directRestoreCluster1 := newTidbClusterConfig("ns1", "restore1")
fileRestoreCluster1 := newTidbClusterConfig("ns1", "file-restore1")
directRestoreCluster2 := newTidbClusterConfig("ns2", "restore2")
fileRestoreCluster2 := newTidbClusterConfig("ns2", "file-restore2")
onePDCluster1 := newTidbClusterConfig("ns1", "one-pd-cluster-1")
onePDCluster2 := newTidbClusterConfig("ns2", "one-pd-cluster-2")
onePDCluster1.Clustrer.Spec.PD.Replicas = 1
onePDCluster2.Clustrer.Spec.PD.Replicas = 1
allClusters := []*tests.TidbClusterConfig{
cluster1,
cluster2,
cluster3,
directRestoreCluster1,
fileRestoreCluster1,
directRestoreCluster2,
fileRestoreCluster2,
onePDCluster1,
onePDCluster2,
}
deployedClusters := make([]*tests.TidbClusterConfig, 0)
addDeployedClusterFn := func(cluster *tests.TidbClusterConfig) {
for _, tc := range deployedClusters {
if tc.Namespace == cluster.Namespace && tc.ClusterName == cluster.ClusterName {
return
}
}
deployedClusters = append(deployedClusters, cluster)
}
fta := tests.NewFaultTriggerAction(cli, kubeCli, cfg)
fta.CheckAndRecoverEnvOrDie()
oa := tests.NewOperatorActions(cli, kubeCli, asCli, aggrCli, apiExtCli, tests.DefaultPollInterval, ocfg, cfg, allClusters, nil, nil)
oa.CheckK8sAvailableOrDie(nil, nil)
oa.LabelNodesOrDie()
go oa.RunEventWorker()
oa.CleanOperatorOrDie(ocfg)
oa.DeployOperatorOrDie(ocfg)
crdUtil := tests.NewCrdTestUtil(cli, kubeCli, asCli, kubeCli.AppsV1())
log.Logf(fmt.Sprintf("allclusters: %v", allClusters))
crdUtil.CleanResourcesOrDie("tc", "ns1")
crdUtil.CleanResourcesOrDie("tc", "ns2")
crdUtil.CleanResourcesOrDie("pvc", "ns1")
crdUtil.CleanResourcesOrDie("pvc", "ns2")
crdUtil.CleanResourcesOrDie("secret", "ns1")
crdUtil.CleanResourcesOrDie("secret", "ns2")
crdUtil.CleanResourcesOrDie("pod", "ns1")
crdUtil.CleanResourcesOrDie("pod", "ns2")
caseFn := func(clusters []*tests.TidbClusterConfig, onePDClsuter *tests.TidbClusterConfig, backupTargets []tests.BackupTarget, upgradeVersion string) {
// check env
fta.CheckAndRecoverEnvOrDie()
oa.CheckK8sAvailableOrDie(nil, nil)
//deploy and clean the one-pd-cluster
onePDTC := onePDClsuter.Clustrer
crdUtil.CreateTidbClusterOrDie(onePDTC)
crdUtil.WaitTidbClusterReadyOrDie(onePDTC, 60*time.Minute)
crdUtil.DeleteTidbClusterOrDie(onePDTC)
// deploy
for _, cluster := range clusters {
tc := cluster.Clustrer
crdUtil.CreateTidbClusterOrDie(tc)
secret := buildSecret(cluster)
crdUtil.CreateSecretOrDie(secret)
addDeployedClusterFn(cluster)
}
for _, cluster := range clusters {
tc := cluster.Clustrer
crdUtil.WaitTidbClusterReadyOrDie(tc, 60*time.Minute)
crdUtil.CheckDisasterToleranceOrDie(tc)
oa.BeginInsertDataToOrDie(cluster)
}
log.Logf("clusters deployed and checked")
slack.NotifyAndCompletedf("clusters deployed and checked, ready to run stability test")
// upgrade
namespace := os.Getenv("NAMESPACE")
oa.RegisterWebHookAndServiceOrDie(ocfg.WebhookConfigName, namespace, ocfg.WebhookServiceName, certCtx)
for _, cluster := range clusters {
cluster.Clustrer.Spec.Version = upgradeVersion
crdUtil.UpdateTidbClusterOrDie(cluster.Clustrer)
crdUtil.WaitTidbClusterReadyOrDie(cluster.Clustrer, 60*time.Minute)
}
log.Logf("clusters upgraded in checked")
// configuration change
for _, cluster := range clusters {
cluster.Clustrer.Spec.PD.Replicas = int32(cfg.PDMaxReplicas)
cluster.Clustrer.Spec.TiKV.Config.Set("server.grpc-concurrency", cfg.TiKVGrpcConcurrency)
cluster.Clustrer.Spec.TiDB.Config.Set("token-limit", cfg.TiDBTokenLimit)
crdUtil.UpdateTidbClusterOrDie(cluster.Clustrer)
crdUtil.WaitTidbClusterReadyOrDie(cluster.Clustrer, 60*time.Minute)
}
oa.CleanWebHookAndServiceOrDie(ocfg.WebhookConfigName)
log.Logf("clusters configurations updated in checked")
for _, cluster := range clusters {
crdUtil.CheckDisasterToleranceOrDie(cluster.Clustrer)
}
log.Logf("clusters DisasterTolerance checked")
//stop node
physicalNode, node, faultTime := fta.StopNodeOrDie()
oa.EmitEvent(nil, fmt.Sprintf("StopNode: %s on %s", node, physicalNode))
oa.CheckFailoverPendingOrDie(deployedClusters, node, &faultTime)
oa.CheckFailoverOrDie(deployedClusters, node)
time.Sleep(3 * time.Minute)
fta.StartNodeOrDie(physicalNode, node)
oa.EmitEvent(nil, fmt.Sprintf("StartNode: %s on %s", node, physicalNode))
oa.WaitPodOnNodeReadyOrDie(deployedClusters, node)
oa.CheckRecoverOrDie(deployedClusters)
for _, cluster := range deployedClusters {
crdUtil.WaitTidbClusterReadyOrDie(cluster.Clustrer, 30*time.Minute)
}
log.Logf("clusters node stopped and restarted checked")
slack.NotifyAndCompletedf("stability test: clusters node stopped and restarted checked")
// truncate tikv sst file
oa.TruncateSSTFileThenCheckFailoverOrDie(clusters[0], 5*time.Minute)
log.Logf("clusters truncate sst file and checked failover")
slack.NotifyAndCompletedf("stability test: clusters truncate sst file and checked failover")
// delete pd data
oa.DeletePDDataThenCheckFailoverOrDie(clusters[0], 5*time.Minute)
log.Logf("cluster[%s/%s] DeletePDDataThenCheckFailoverOrDie success", clusters[0].Namespace, clusters[0].ClusterName)
slack.NotifyAndCompletedf("stability test: DeletePDDataThenCheckFailoverOrDie success")
// stop one etcd
faultEtcd := tests.SelectNode(cfg.ETCDs)
fta.StopETCDOrDie(faultEtcd)
defer fta.StartETCDOrDie(faultEtcd)
time.Sleep(3 * time.Minute)
oa.CheckEtcdDownOrDie(ocfg, deployedClusters, faultEtcd)
fta.StartETCDOrDie(faultEtcd)
log.Logf("clusters stop on etcd and restart")
// stop all etcds
fta.StopETCDOrDie()
time.Sleep(10 * time.Minute)
fta.StartETCDOrDie()
oa.CheckEtcdDownOrDie(ocfg, deployedClusters, "")
log.Logf("clusters stop all etcd and restart")
// stop all kubelets
fta.StopKubeletOrDie()
time.Sleep(10 * time.Minute)
fta.StartKubeletOrDie()
oa.CheckKubeletDownOrDie(ocfg, deployedClusters, "")
log.Logf("clusters stop all kubelets and restart")
// stop all kube-proxy and k8s/operator/tidbcluster is available
fta.StopKubeProxyOrDie()
oa.CheckKubeProxyDownOrDie(ocfg, clusters)
fta.StartKubeProxyOrDie()
log.Logf("clusters stop all kube-proxy and restart")
// stop all kube-scheduler pods
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StopKubeSchedulerOrDie(vNode.IP)
}
}
oa.CheckKubeSchedulerDownOrDie(ocfg, clusters)
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StartKubeSchedulerOrDie(vNode.IP)
}
}
log.Logf("clusters stop all kube-scheduler and restart")
// stop all kube-controller-manager pods
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StopKubeControllerManagerOrDie(vNode.IP)
}
}
oa.CheckKubeControllerManagerDownOrDie(ocfg, clusters)
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StartKubeControllerManagerOrDie(vNode.IP)
}
}
log.Logf("clusters stop all kube-controller and restart")
// stop one kube-apiserver pod
faultApiServer := tests.SelectNode(cfg.APIServers)
log.Logf("fault ApiServer Node name = %s", faultApiServer)
fta.StopKubeAPIServerOrDie(faultApiServer)
defer fta.StartKubeAPIServerOrDie(faultApiServer)
time.Sleep(3 * time.Minute)
oa.CheckOneApiserverDownOrDie(ocfg, clusters, faultApiServer)
fta.StartKubeAPIServerOrDie(faultApiServer)
log.Logf("clusters stop one kube-apiserver and restart")
time.Sleep(time.Minute)
// stop all kube-apiserver pods
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StopKubeAPIServerOrDie(vNode.IP)
}
}
oa.CheckAllApiserverDownOrDie(ocfg, clusters)
for _, physicalNode := range cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
fta.StartKubeAPIServerOrDie(vNode.IP)
}
}
log.Logf("clusters stop all kube-apiserver and restart")
time.Sleep(time.Minute)
}
// before operator upgrade
preUpgrade := []*tests.TidbClusterConfig{
cluster1,
cluster2,
}
backupTargets := []tests.BackupTarget{
{
TargetCluster: directRestoreCluster1,
IsAdditional: false,
IncrementalType: tests.DbTypeTiDB,
},
}
if ocfg.Tag != "v1.0.0" {
backupTargets = append(backupTargets, tests.BackupTarget{
TargetCluster: fileRestoreCluster1,
IsAdditional: true,
IncrementalType: tests.DbTypeFile,
})
}
caseFn(preUpgrade, onePDCluster1, backupTargets, upgradeVersions[0])
// after operator upgrade
if cfg.UpgradeOperatorImage != "" && cfg.UpgradeOperatorTag != "" {
ocfg.Image = cfg.UpgradeOperatorImage
ocfg.Tag = cfg.UpgradeOperatorTag
oa.UpgradeOperatorOrDie(ocfg)
postUpgrade := []*tests.TidbClusterConfig{
cluster3,
cluster1,
cluster2,
}
v := upgradeVersions[0]
if len(upgradeVersions) == 2 {
v = upgradeVersions[1]
}
postUpgradeBackupTargets := []tests.BackupTarget{
{
TargetCluster: directRestoreCluster2,
IsAdditional: false,
IncrementalType: tests.DbTypeTiDB,
},
}
if ocfg.Tag != "v1.0.0" {
postUpgradeBackupTargets = append(postUpgradeBackupTargets, tests.BackupTarget{
TargetCluster: fileRestoreCluster2,
IsAdditional: true,
IncrementalType: tests.DbTypeFile,
})
}
// caseFn(postUpgrade, restoreCluster2, tidbUpgradeVersion)
caseFn(postUpgrade, onePDCluster2, postUpgradeBackupTargets, v)
}
for _, cluster := range allClusters {
oa.StopInsertDataTo(cluster)
}
slack.SuccessCount++
slack.NotifyAndCompletedf("Succeed stability onetime")
log.Logf("################## Stability test finished at: %v\n\n\n\n", time.Now().Format(time.RFC3339))
}
func newOperatorConfig() *tests.OperatorConfig {
return &tests.OperatorConfig{
Namespace: "pingcap",
ReleaseName: "operator",
Image: cfg.OperatorImage,
Tag: cfg.OperatorTag,
ControllerManagerReplicas: tests.IntPtr(2),
SchedulerImage: "gcr.io/google-containers/hyperkube",
SchedulerReplicas: tests.IntPtr(2),
Features: []string{
"StableScheduling=true",
},
LogLevel: "2",
WebhookServiceName: tests.WebhookServiceName,
WebhookSecretName: "webhook-secret",
WebhookConfigName: "webhook-config",
ImagePullPolicy: v1.PullAlways,
TestMode: true,
WebhookEnabled: true,
PodWebhookEnabled: false,
StsWebhookEnabled: true,
}
}
func newTidbClusterConfig(ns, clusterName string) *tests.TidbClusterConfig {
tidbVersion := cfg.GetTiDBVersionOrDie()
topologyKey := "rack"
tc := fixture.GetTidbCluster(ns, clusterName, tidbVersion)
tc.Spec.PD.StorageClassName = pointer.StringPtr("local-storage")
tc.Spec.TiKV.StorageClassName = pointer.StringPtr("local-storage")
tc.Spec.ConfigUpdateStrategy = v1alpha1.ConfigUpdateStrategyRollingUpdate
return &tests.TidbClusterConfig{
Namespace: ns,
ClusterName: clusterName,
OperatorTag: cfg.OperatorTag,
PDImage: fmt.Sprintf("pingcap/pd:%s", tidbVersion),
TiKVImage: fmt.Sprintf("pingcap/tikv:%s", tidbVersion),
TiDBImage: fmt.Sprintf("pingcap/tidb:%s", tidbVersion),
PumpImage: fmt.Sprintf("pingcap/tidb-binlog:%s", tidbVersion),
StorageClassName: "local-storage",
UserName: "root",
Password: "",
InitSecretName: fmt.Sprintf("%s-set-secret", clusterName),
BackupSecretName: fmt.Sprintf("%s-backup-secret", clusterName),
BackupName: "backup",
Resources: map[string]string{
"pd.resources.limits.cpu": "1000m",
"pd.resources.limits.memory": "2Gi",
"pd.resources.requests.cpu": "200m",
"pd.resources.requests.memory": "1Gi",
"tikv.resources.limits.cpu": "8000m",
"tikv.resources.limits.memory": "16Gi",
"tikv.resources.requests.cpu": "1000m",
"tikv.resources.requests.memory": "2Gi",
"tidb.resources.limits.cpu": "8000m",
"tidb.resources.limits.memory": "8Gi",
"tidb.resources.requests.cpu": "500m",
"tidb.resources.requests.memory": "1Gi",
"monitor.persistent": "true",
"discovery.image": cfg.OperatorImage,
"tikv.defaultcfBlockCacheSize": "8GB",
"tikv.writecfBlockCacheSize": "2GB",
"pvReclaimPolicy": "Delete",
},
Args: map[string]string{
"binlog.drainer.workerCount": "1024",
"binlog.drainer.txnBatch": "512",
},
Monitor: true,
BlockWriteConfig: cfg.BlockWriter,
TopologyKey: topologyKey,
ClusterVersion: tidbVersion,
EnableConfigMapRollout: true,
Clustrer: tc,
}
}
func buildSecret(info *tests.TidbClusterConfig) *corev1.Secret {
backupSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: info.BackupSecretName,
Namespace: info.Namespace,
},
Data: map[string][]byte{
"user": []byte(info.UserName),
"password": []byte(info.Password),
},
Type: corev1.SecretTypeOpaque,
}
return &backupSecret
}
|
[
"\"NAMESPACE\"",
"\"NAMESPACE\""
] |
[] |
[
"NAMESPACE"
] |
[]
|
["NAMESPACE"]
|
go
| 1 | 0 | |
v1/ao/internal/reporter/reporter_test.go
|
// Copyright (C) 2017 Librato, Inc. All rights reserved.
package reporter
import (
"context"
"io"
"net"
"os"
"reflect"
"testing"
"time"
"strings"
"github.com/appoptics/appoptics-apm-go/v1/ao/internal/bson"
"github.com/appoptics/appoptics-apm-go/v1/ao/internal/config"
g "github.com/appoptics/appoptics-apm-go/v1/ao/internal/graphtest"
"github.com/appoptics/appoptics-apm-go/v1/ao/internal/host"
"github.com/appoptics/appoptics-apm-go/v1/ao/internal/log"
"github.com/appoptics/appoptics-apm-go/v1/ao/internal/metrics"
pb "github.com/appoptics/appoptics-apm-go/v1/ao/internal/reporter/collector"
"github.com/appoptics/appoptics-apm-go/v1/ao/internal/reporter/mocks"
"github.com/appoptics/appoptics-apm-go/v1/ao/internal/utils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
mbson "gopkg.in/mgo.v2/bson"
)
const TestServiceKey = "ae38315f6116585d64d82ec2455aa3ec61e02fee25d286f74ace9e4fea189217:go"
// this runs before init()
var _ = func() (_ struct{}) {
periodicTasksDisabled = true
os.Clearenv()
os.Setenv("APPOPTICS_SERVICE_KEY", TestServiceKey)
os.Setenv("APPOPTICS_DEBUG_LEVEL", "debug")
config.Load()
return
}()
// ========================= Test Reporter =============================
func TestReportEvent(t *testing.T) {
r := SetTestReporter()
ctx := newTestContext(t)
assert.Error(t, r.reportEvent(ctx, nil))
assert.Len(t, r.EventBufs, 0) // no reporting
// mismatched task IDs
ev, err := ctx.newEvent(LabelExit, testLayer)
assert.NoError(t, err)
assert.Error(t, r.reportEvent(nil, ev))
assert.Len(t, r.EventBufs, 0) // no reporting
ctx2 := newTestContext(t)
e2, err := ctx2.newEvent(LabelEntry, "layer2")
assert.NoError(t, err)
assert.Error(t, r.reportEvent(ctx2, ev))
assert.Error(t, r.reportEvent(ctx, e2))
// successful event
assert.NoError(t, r.reportEvent(ctx, ev))
r.Close(1)
assert.Len(t, r.EventBufs, 1)
// re-report: shouldn't work (op IDs the same, reporter closed)
assert.Error(t, r.reportEvent(ctx, ev))
g.AssertGraph(t, r.EventBufs, 1, g.AssertNodeMap{
{"go_test", "exit"}: {},
})
}
func TestReportMetric(t *testing.T) {
r := SetTestReporter()
spanMsg := &metrics.HTTPSpanMessage{
BaseSpanMessage: metrics.BaseSpanMessage{
Duration: time.Second,
HasError: false,
},
Transaction: "tname",
Path: "/path/to/url",
Status: 203,
Method: "HEAD",
}
err := ReportSpan(spanMsg)
assert.NoError(t, err)
r.Close(1)
assert.Len(t, r.SpanMessages, 1)
sp, ok := r.SpanMessages[0].(*metrics.HTTPSpanMessage)
require.True(t, ok)
require.NotNil(t, sp)
assert.True(t, reflect.DeepEqual(spanMsg, sp))
}
// test behavior of the TestReporter
func TestTestReporter(t *testing.T) {
r := SetTestReporter()
r.Close(0) // wait on event that will never be reported: causes timeout
assert.Len(t, r.EventBufs, 0)
r = SetTestReporter()
go func() { // simulate late event
time.Sleep(100 * time.Millisecond)
ctx := newTestContext(t)
ev, err := ctx.newEvent(LabelExit, testLayer)
assert.NoError(t, err)
assert.NoError(t, r.reportEvent(ctx, ev))
}()
r.Close(1) // wait on late event -- blocks until timeout or event received
assert.Len(t, r.EventBufs, 1)
// send an event after calling Close -- should panic
assert.Panics(t, func() {
ctx := newTestContext(t)
ev, err := ctx.newEvent(LabelExit, testLayer)
assert.NoError(t, err)
assert.NoError(t, r.reportEvent(ctx, ev))
})
}
// ========================= NULL Reporter =============================
func TestNullReporter(t *testing.T) {
nullR := &nullReporter{}
assert.NoError(t, nullR.reportEvent(nil, nil))
assert.NoError(t, nullR.reportStatus(nil, nil))
assert.NoError(t, nullR.reportSpan(nil))
}
// ========================= UDP Reporter =============================
func startTestUDPListener(t *testing.T, bufs *[][]byte, numbufs int) chan struct{} {
done := make(chan struct{})
assert.IsType(t, &udpReporter{}, globalReporter)
addr, err := net.ResolveUDPAddr("udp4", os.Getenv("APPOPTICS_COLLECTOR_UDP"))
assert.NoError(t, err)
conn, err := net.ListenUDP("udp4", addr)
assert.NoError(t, err)
go func(numBufs int) {
defer conn.Close()
for i := 0; i < numBufs; i++ {
buf := make([]byte, 128*1024)
n, _, err := conn.ReadFromUDP(buf)
t.Logf("Got UDP buf len %v err %v", n, err)
if err != nil {
t.Logf("UDP listener got err, quitting %v", err)
break
}
*bufs = append(*bufs, buf[0:n])
}
close(done)
t.Logf("Closing UDP listener, got %d bufs", numBufs)
}(numbufs)
return done
}
func assertUDPMode(t *testing.T) {
// for UDP mode run test like this:
// APPOPTICS_REPORTER=udp go test -v
if os.Getenv("APPOPTICS_REPORTER") != "udp" {
t.Skip("not running in UDP mode, skipping.")
}
}
func TestUDPReporter(t *testing.T) {
assertUDPMode(t)
assert.IsType(t, &udpReporter{}, globalReporter)
r := globalReporter.(*udpReporter)
ctx := newTestContext(t)
ev1, _ := ctx.newEvent(LabelInfo, testLayer)
ev2, _ := ctx.newEvent(LabelInfo, testLayer)
var bufs [][]byte
startTestUDPListener(t, &bufs, 2)
assert.Error(t, r.reportEvent(nil, nil))
assert.Error(t, r.reportEvent(ctx, nil))
assert.NoError(t, r.reportEvent(ctx, ev1))
assert.Error(t, r.reportStatus(nil, nil))
assert.Error(t, r.reportStatus(ctx, nil))
assert.NoError(t, r.reportStatus(ctx, ev2))
}
// ========================= GRPC Reporter =============================
func assertSSLMode(t *testing.T) {
if os.Getenv("APPOPTICS_REPORTER") == "udp" {
t.Skip("not running in SSL mode, skipping.")
}
}
func TestGRPCReporter(t *testing.T) {
// start test gRPC server
os.Setenv("APPOPTICS_DEBUG_LEVEL", "debug")
config.Load()
addr := "localhost:4567"
server := StartTestGRPCServer(t, addr)
time.Sleep(100 * time.Millisecond)
// set gRPC reporter
os.Setenv("APPOPTICS_COLLECTOR", addr)
os.Setenv("APPOPTICS_TRUSTEDPATH", testCertFile)
config.Load()
oldReporter := globalReporter
setGlobalReporter("ssl")
require.IsType(t, &grpcReporter{}, globalReporter)
r := globalReporter.(*grpcReporter)
// Test WaitForReady
// The reporter is not ready when there is no default setting.
ctxTm1, cancel1 := context.WithTimeout(context.Background(), 0)
defer cancel1()
assert.False(t, r.WaitForReady(ctxTm1))
// The reporter becomes ready after it has got the default setting.
ready := make(chan bool, 1)
r.getSettings(ready)
ctxTm2, cancel2 := context.WithTimeout(context.Background(), time.Millisecond)
defer cancel2()
assert.True(t, r.WaitForReady(ctxTm2))
assert.True(t, r.isReady())
ctx := newTestContext(t)
ev1, err := ctx.newEvent(LabelInfo, "layer1")
assert.NoError(t, err)
ev2, err := ctx.newEvent(LabelInfo, "layer2")
assert.NoError(t, err)
assert.Error(t, r.reportEvent(nil, nil))
assert.Error(t, r.reportEvent(ctx, nil))
assert.NoError(t, r.reportEvent(ctx, ev1))
assert.Error(t, r.reportStatus(nil, nil))
assert.Error(t, r.reportStatus(ctx, nil))
// time.Sleep(time.Second)
assert.NoError(t, r.reportStatus(ctx, ev2))
assert.Equal(t, addr, r.conn.address)
assert.Equal(t, TestServiceKey, r.serviceKey.Load())
assert.Equal(t, int32(grpcMetricIntervalDefault), r.collectMetricInterval)
assert.Equal(t, grpcGetSettingsIntervalDefault, r.getSettingsInterval)
assert.Equal(t, grpcSettingsTimeoutCheckIntervalDefault, r.settingsTimeoutCheckInterval)
time.Sleep(time.Second)
// The reporter becomes not ready after the default setting has been deleted
removeSetting("")
r.checkSettingsTimeout(make(chan bool, 1))
assert.False(t, r.isReady())
ctxTm3, cancel3 := context.WithTimeout(context.Background(), 0)
assert.False(t, r.WaitForReady(ctxTm3))
defer cancel3()
// stop test reporter
server.Stop()
globalReporter = oldReporter
// assert data received
require.Len(t, server.events, 1)
assert.Equal(t, server.events[0].Encoding, pb.EncodingType_BSON)
require.Len(t, server.events[0].Messages, 1)
require.Len(t, server.status, 1)
assert.Equal(t, server.status[0].Encoding, pb.EncodingType_BSON)
require.Len(t, server.status[0].Messages, 1)
dec1, dec2 := mbson.M{}, mbson.M{}
err = mbson.Unmarshal(server.events[0].Messages[0], &dec1)
require.NoError(t, err)
err = mbson.Unmarshal(server.status[0].Messages[0], &dec2)
require.NoError(t, err)
assert.Equal(t, dec1["Layer"], "layer1")
assert.Equal(t, dec1["Hostname"], host.Hostname())
assert.Equal(t, dec1["Label"], LabelInfo)
assert.Equal(t, dec1["PID"], host.PID())
assert.Equal(t, dec2["Layer"], "layer2")
}
func TestShutdownGRPCReporter(t *testing.T) {
// start test gRPC server
os.Setenv("APPOPTICS_DEBUG_LEVEL", "debug")
addr := "localhost:4567"
server := StartTestGRPCServer(t, addr)
time.Sleep(100 * time.Millisecond)
// set gRPC reporter
os.Setenv("APPOPTICS_COLLECTOR", addr)
os.Setenv("APPOPTICS_TRUSTEDPATH", testCertFile)
config.Load()
oldReporter := globalReporter
// numGo := runtime.NumGoroutine()
setGlobalReporter("ssl")
require.IsType(t, &grpcReporter{}, globalReporter)
r := globalReporter.(*grpcReporter)
r.ShutdownNow()
assert.Equal(t, true, r.Closed())
// // Print current goroutines stack
// buf := make([]byte, 1<<16)
// runtime.Stack(buf, true)
// fmt.Printf("%s", buf)
e := r.ShutdownNow()
assert.NotEqual(t, nil, e)
// stop test reporter
server.Stop()
globalReporter = oldReporter
// fmt.Println(buf)
}
func TestInvalidKey(t *testing.T) {
var buf utils.SafeBuffer
var writers []io.Writer
writers = append(writers, &buf)
writers = append(writers, os.Stderr)
log.SetOutput(io.MultiWriter(writers...))
defer func() {
log.SetOutput(os.Stderr)
}()
invalidKey := "invalidf6116585d64d82ec2455aa3ec61e02fee25d286f74ace9e4fea189217:Go"
os.Setenv("APPOPTICS_DEBUG_LEVEL", "debug")
oldKey := os.Getenv("APPOPTICS_SERVICE_KEY")
os.Setenv("APPOPTICS_SERVICE_KEY", invalidKey)
addr := "localhost:4567"
os.Setenv("APPOPTICS_COLLECTOR", addr)
os.Setenv("APPOPTICS_TRUSTEDPATH", testCertFile)
// start test gRPC server
server := StartTestGRPCServer(t, addr)
time.Sleep(100 * time.Millisecond)
// set gRPC reporter
config.Load()
oldReporter := globalReporter
log.SetLevel(log.INFO)
setGlobalReporter("ssl")
require.IsType(t, &grpcReporter{}, globalReporter)
r := globalReporter.(*grpcReporter)
ctx := newTestContext(t)
ev1, _ := ctx.newEvent(LabelInfo, "hello-from-invalid-key")
assert.NoError(t, r.reportEvent(ctx, ev1))
time.Sleep(time.Second)
// The agent reporter should be closed due to received INVALID_API_KEY from the collector
assert.Equal(t, true, r.Closed())
e := r.ShutdownNow()
assert.NotEqual(t, nil, e)
// Tear down everything.
server.Stop()
globalReporter = oldReporter
os.Setenv("APPOPTICS_SERVICE_KEY", oldKey)
patterns := []string{
"rsp=INVALID_API_KEY",
"Shutting down the reporter",
// "periodicTasks goroutine exiting",
"eventSender goroutine exiting",
"spanMessageAggregator goroutine exiting",
"statusSender goroutine exiting",
"eventBatchSender goroutine exiting",
}
for _, ptn := range patterns {
assert.True(t, strings.Contains(buf.String(), ptn), buf.String()+"^^^^^^"+ptn)
}
log.SetLevel(log.WARNING)
}
func TestDefaultBackoff(t *testing.T) {
var backoff []int64
expected := []int64{
500, 750, 1125, 1687, 2531, 3796, 5695, 8542, 12814, 19221, 28832,
43248, 60000, 60000, 60000, 60000, 60000, 60000, 60000, 60000}
bf := func(d time.Duration) { backoff = append(backoff, d.Nanoseconds()/1e6) }
for i := 1; i <= grpcMaxRetries+1; i++ {
DefaultBackoff(i, bf)
}
assert.Equal(t, expected, backoff)
assert.NotNil(t, DefaultBackoff(grpcMaxRetries+1, func(d time.Duration) {}))
}
type NoopDialer struct{}
func (d *NoopDialer) Dial(p DialParams) (*grpc.ClientConn, error) {
return nil, nil
}
func TestInvokeRPC(t *testing.T) {
var buf utils.SafeBuffer
var writers []io.Writer
writers = append(writers, &buf)
writers = append(writers, os.Stderr)
log.SetOutput(io.MultiWriter(writers...))
defer func() {
log.SetOutput(os.Stderr)
}()
c := &grpcConnection{
name: "events channel",
client: nil,
connection: nil,
address: "test-addr",
certificate: []byte(grpcCertDefault),
queueStats: &metrics.EventQueueStats{},
backoff: func(retries int, wait func(d time.Duration)) error {
if retries > grpcMaxRetries {
return errGiveUpAfterRetries
}
return nil
},
Dialer: &NoopDialer{},
flushed: make(chan struct{}),
maxReqBytes: 6 * 1024 * 1024,
}
_ = c.connect()
// Test reporter exiting
mockMethod := &mocks.Method{}
mockMethod.On("String").Return("mock")
mockMethod.On("ServiceKey").Return("")
mockMethod.On("Call", mock.Anything, mock.Anything).
Return(nil)
mockMethod.On("Message").Return(nil)
mockMethod.On("MessageLen").Return(int64(0))
mockMethod.On("RequestSize").Return(int64(1))
mockMethod.On("CallSummary").Return("summary")
mockMethod.On("Arg").Return("testArg")
exit := make(chan struct{})
close(exit)
c.setFlushed()
assert.Equal(t, errReporterExiting, c.InvokeRPC(exit, mockMethod))
// Test invalid service key
exit = make(chan struct{})
mockMethod = &mocks.Method{}
mockMethod.On("Call", mock.Anything, mock.Anything).
Return(nil)
mockMethod.On("String").Return("mock")
mockMethod.On("ServiceKey").Return("serviceKey")
mockMethod.On("Message").Return(nil)
mockMethod.On("MessageLen").Return(int64(0))
mockMethod.On("RequestSize").Return(int64(1))
mockMethod.On("CallSummary").Return("summary")
mockMethod.On("Arg").Return("testArg")
mockMethod.On("ResultCode", mock.Anything, mock.Anything).
Return(pb.ResultCode_INVALID_API_KEY, nil)
mockMethod.On("RetryOnErr", mock.Anything).Return(false)
assert.Equal(t, errInvalidServiceKey, c.InvokeRPC(exit, mockMethod))
// Test no retry
mockMethod = &mocks.Method{}
mockMethod.On("Call", mock.Anything, mock.Anything).
Return(nil)
mockMethod.On("String").Return("mock")
mockMethod.On("ServiceKey").Return("serviceKey")
mockMethod.On("Message").Return(nil)
mockMethod.On("MessageLen").Return(int64(0))
mockMethod.On("RequestSize").Return(int64(1))
mockMethod.On("CallSummary").Return("summary")
mockMethod.On("Arg").Return("testArg")
mockMethod.On("ResultCode", mock.Anything, mock.Anything).
Return(pb.ResultCode_LIMIT_EXCEEDED, nil)
mockMethod.On("RetryOnErr", mock.Anything).Return(false)
assert.Equal(t, errNoRetryOnErr, c.InvokeRPC(exit, mockMethod))
// Test invocation error / recovery logs
failsNum := grpcRetryLogThreshold + (grpcMaxRetries-grpcRetryLogThreshold)/2
mockMethod = &mocks.Method{}
mockMethod.On("String").Return("mock")
mockMethod.On("ServiceKey").Return("serviceKey")
mockMethod.On("Message").Return(nil)
mockMethod.On("MessageLen").Return(int64(0))
mockMethod.On("RequestSize").Return(int64(1))
mockMethod.On("CallSummary").Return("summary")
mockMethod.On("Arg").Return("testArg")
mockMethod.On("RetryOnErr", mock.Anything).Return(true)
mockMethod.On("ResultCode", mock.Anything, mock.Anything).
Return(pb.ResultCode_OK, nil)
mockMethod.On("Call", mock.Anything, mock.Anything).
Return(func(ctx context.Context, c pb.TraceCollectorClient) error {
failsNum--
if failsNum <= 0 {
return nil
} else {
return status.Error(codes.Canceled, "Canceled")
}
})
assert.Equal(t, nil, c.InvokeRPC(exit, mockMethod))
assert.True(t, strings.Contains(buf.String(), "invocation error"))
assert.True(t, strings.Contains(buf.String(), "error recovered"))
// Test redirect
redirectNum := 1
mockMethod = &mocks.Method{}
mockMethod.On("String").Return("mock")
mockMethod.On("ServiceKey").Return("serviceKey")
mockMethod.On("Message").Return(nil)
mockMethod.On("MessageLen").Return(int64(0))
mockMethod.On("RequestSize").Return(int64(1))
mockMethod.On("CallSummary").Return("summary")
mockMethod.On("RetryOnErr", mock.Anything).Return(true)
mockMethod.On("Arg", mock.Anything, mock.Anything).
Return("new-addr:9999")
mockMethod.On("ResultCode", mock.Anything, mock.Anything).
Return(func() pb.ResultCode {
redirectNum--
if redirectNum < 0 {
return pb.ResultCode_OK
} else {
return pb.ResultCode_REDIRECT
}
}, nil)
mockMethod.On("Call", mock.Anything, mock.Anything).
Return(nil)
assert.Equal(t, nil, c.InvokeRPC(exit, mockMethod))
assert.True(t, c.isActive())
assert.Equal(t, "new-addr:9999", c.address)
// Test request too big
mockMethod = &mocks.Method{}
mockMethod.On("Call", mock.Anything, mock.Anything).
Return(nil)
mockMethod.On("String").Return("mock")
mockMethod.On("ServiceKey").Return("serviceKey")
mockMethod.On("Message").Return(nil)
mockMethod.On("MessageLen").Return(int64(0))
mockMethod.On("RequestSize").Return(int64(6*1024*1024 + 1))
mockMethod.On("CallSummary").Return("summary")
mockMethod.On("Arg").Return("testArg")
mockMethod.On("RetryOnErr", mock.Anything).Return(false)
assert.Contains(t, c.InvokeRPC(exit, mockMethod).Error(), errNoRetryOnErr.Error())
}
func TestInitReporter(t *testing.T) {
// Test disable agent
os.Setenv("APPOPTICS_DISABLED", "true")
config.Load()
initReporter()
require.IsType(t, &nullReporter{}, globalReporter)
// Test enable agent
os.Unsetenv("APPOPTICS_DISABLED")
os.Setenv("APPOPTICS_REPORTER", "ssl")
config.Load()
assert.False(t, config.GetDisabled())
initReporter()
require.IsType(t, &grpcReporter{}, globalReporter)
}
func TestCollectMetricsNextInterval(t *testing.T) {
r := &grpcReporter{collectMetricInterval: 10}
next := r.collectMetricsNextInterval()
// very weak check
assert.True(t, next <= time.Second*10, next)
}
func TestCustomMetrics(t *testing.T) {
r := &grpcReporter{
// Other fields are not needed.
customMetrics: metrics.NewMeasurements(true, grpcMetricIntervalDefault, 500),
}
// Test non-positive count
assert.NotNil(t, r.CustomSummaryMetric("Summary", 1.1, metrics.MetricOptions{
Count: 0,
HostTag: true,
Tags: map[string]string{"hello": "world"},
}))
assert.NotNil(t, r.CustomIncrementMetric("Incremental", metrics.MetricOptions{
Count: -1,
HostTag: true,
Tags: map[string]string{"hi": "globe"},
}))
r.CustomSummaryMetric("Summary", 1.1, metrics.MetricOptions{
Count: 1,
HostTag: true,
Tags: map[string]string{"hello": "world"},
})
r.CustomIncrementMetric("Incremental", metrics.MetricOptions{
Count: 1,
HostTag: true,
Tags: map[string]string{"hi": "globe"},
})
custom := metrics.BuildMessage(r.customMetrics.CopyAndReset(grpcMetricIntervalDefault), false)
bbuf := bson.WithBuf(custom)
mMap := mbson.M{}
mbson.Unmarshal(bbuf.GetBuf(), mMap)
assert.Equal(t, mMap["IsCustom"], true)
assert.NotEqual(t, mMap["Timestamp_u"], 0)
assert.Equal(t, mMap["MetricsFlushInterval"], grpcMetricIntervalDefault)
assert.NotEqual(t, mMap["IPAddresses"], nil)
assert.NotEqual(t, mMap["Distro"], "")
mts := mMap["measurements"].([]interface{})
require.Equal(t, len(mts), 2)
mSummary := mts[0].(mbson.M)
mIncremental := mts[1].(mbson.M)
if mSummary["name"] == "Incremental" {
mSummary, mIncremental = mIncremental, mSummary
}
assert.Equal(t, mSummary["name"], "Summary")
assert.Equal(t, mSummary["count"], 1)
assert.Equal(t, mSummary["sum"], 1.1)
assert.EqualValues(t, mbson.M{"hello": "world"}, mSummary["tags"])
assert.Equal(t, mIncremental["name"], "Incremental")
assert.Equal(t, mIncremental["count"], 1)
assert.EqualValues(t, mbson.M{"hi": "globe"}, mIncremental["tags"])
}
// testProxy performs tests of http/https proxy.
func testProxy(t *testing.T, proxyUrl string) {
addr := "localhost:4567"
os.Setenv("APPOPTICS_DEBUG_LEVEL", "debug")
os.Setenv("APPOPTICS_COLLECTOR", addr)
os.Setenv("APPOPTICS_TRUSTEDPATH", testCertFile)
// set proxy
os.Setenv("APPOPTICS_PROXY", proxyUrl)
os.Setenv("APPOPTICS_PROXY_CERT_PATH", testCertFile)
proxy, err := NewTestProxyServer(proxyUrl, testCertFile, testKeyFile)
require.Nil(t, err)
require.Nil(t, proxy.Start())
defer proxy.Stop()
config.Load()
server := StartTestGRPCServer(t, addr)
time.Sleep(100 * time.Millisecond)
oldReporter := globalReporter
defer func() { globalReporter = oldReporter }()
setGlobalReporter("ssl")
require.IsType(t, &grpcReporter{}, globalReporter)
r := globalReporter.(*grpcReporter)
// Test WaitForReady
// The reporter is not ready when there is no default setting.
ctxTm1, cancel1 := context.WithTimeout(context.Background(), 0)
defer cancel1()
assert.False(t, r.WaitForReady(ctxTm1))
// The reporter becomes ready after it has got the default setting.
ready := make(chan bool, 1)
r.getSettings(ready)
ctxTm2, cancel2 := context.WithTimeout(context.Background(), time.Millisecond)
defer cancel2()
assert.True(t, r.WaitForReady(ctxTm2))
assert.True(t, r.isReady())
ctx := newTestContext(t)
ev1, err := ctx.newEvent(LabelInfo, "layer1")
assert.NoError(t, err)
ev2, err := ctx.newEvent(LabelInfo, "layer2")
assert.NoError(t, err)
assert.Error(t, r.reportEvent(nil, nil))
assert.Error(t, r.reportEvent(ctx, nil))
assert.NoError(t, r.reportEvent(ctx, ev1))
assert.Error(t, r.reportStatus(nil, nil))
assert.Error(t, r.reportStatus(ctx, nil))
// time.Sleep(time.Second)
assert.NoError(t, r.reportStatus(ctx, ev2))
assert.Equal(t, addr, r.conn.address)
assert.Equal(t, TestServiceKey, r.serviceKey.Load())
assert.Equal(t, int32(grpcMetricIntervalDefault), r.collectMetricInterval)
assert.Equal(t, grpcGetSettingsIntervalDefault, r.getSettingsInterval)
assert.Equal(t, grpcSettingsTimeoutCheckIntervalDefault, r.settingsTimeoutCheckInterval)
time.Sleep(time.Second)
// The reporter becomes not ready after the default setting has been deleted
removeSetting("")
r.checkSettingsTimeout(make(chan bool, 1))
assert.False(t, r.isReady())
ctxTm3, cancel3 := context.WithTimeout(context.Background(), 0)
assert.False(t, r.WaitForReady(ctxTm3))
defer cancel3()
// stop test reporter
server.Stop()
// assert data received
require.Len(t, server.events, 1)
assert.Equal(t, server.events[0].Encoding, pb.EncodingType_BSON)
require.Len(t, server.events[0].Messages, 1)
require.Len(t, server.status, 1)
assert.Equal(t, server.status[0].Encoding, pb.EncodingType_BSON)
require.Len(t, server.status[0].Messages, 1)
dec1, dec2 := mbson.M{}, mbson.M{}
err = mbson.Unmarshal(server.events[0].Messages[0], &dec1)
require.NoError(t, err)
err = mbson.Unmarshal(server.status[0].Messages[0], &dec2)
require.NoError(t, err)
assert.Equal(t, dec1["Layer"], "layer1")
assert.Equal(t, dec1["Hostname"], host.Hostname())
assert.Equal(t, dec1["Label"], LabelInfo)
assert.Equal(t, dec1["PID"], host.PID())
assert.Equal(t, dec2["Layer"], "layer2")
}
func TestHttpProxy(t *testing.T) {
testProxy(t, "http://usr:pwd@localhost:12345")
}
func TestHttpsProxy(t *testing.T) {
testProxy(t, "https://usr:pwd@localhost:12345")
}
func TestFlush(t *testing.T) {
globalReporter = newGRPCReporter()
r := globalReporter.(*grpcReporter)
assert.NoError(t, r.Flush())
}
|
[
"\"APPOPTICS_COLLECTOR_UDP\"",
"\"APPOPTICS_REPORTER\"",
"\"APPOPTICS_REPORTER\"",
"\"APPOPTICS_SERVICE_KEY\""
] |
[] |
[
"APPOPTICS_REPORTER",
"APPOPTICS_SERVICE_KEY",
"APPOPTICS_COLLECTOR_UDP"
] |
[]
|
["APPOPTICS_REPORTER", "APPOPTICS_SERVICE_KEY", "APPOPTICS_COLLECTOR_UDP"]
|
go
| 3 | 0 | |
simple_test.py
|
import os
import bottle
import bottle_session
try:
import urlparse
except:
from urllib import parse as urlparse
import random
import redis
import string
import logging
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
app = bottle.app()
session_plugin = bottle_session.SessionPlugin(cookie_lifetime=bottle_session.MAX_TTL)
#redis_url = os.environ.get('REDIS_URL','http://:foobared@localhost:6379')
#parsed_url = urlparse.urlparse(redis_url)
#connection_pool = redis.ConnectionPool(host=parsed_url.hostname, port=parsed_url.port, password=parsed_url.password)
#session_plugin.connection_pool = connection_pool
app.install(session_plugin)
@bottle.route('/')
def get_main_page(session):
csrf = ''.join(random.choice(string.ascii_uppercase+string.ascii_lowercase+string.digits) for x in range(32))
session['csrf'] = csrf
logger.debug("Items")
for k,v in session.items():
logger.debug("%s:%s",k,v)
keys = session.keys()
logger.debug("Keys: %s", ', '.join(keys))
values = session.values()
logger.debug("Values: %s", ', '.join(values))
if session.get('name') is None:
context = {'csrf_token': csrf}
return bottle.template('set_name', **context)
else:
context = {'csrf_token': csrf,
'name': session.get('name')
}
return bottle.template('has_name', **context)
@bottle.route('/submit', method='POST')
def set_name(session):
keys = bottle.request.forms.keys()
session['name'] = bottle.request.forms.name.strip()
csrf = bottle.request.forms.get('csrf_token')
if session['csrf']!=csrf:
return bottle.template('error', warning_message='Cross-site scripting error.')
bottle.redirect('/')
@bottle.route('/logout')
def logout(session):
session.destroy()
bottle.redirect('/')
if __name__=='__main__':
bottle.debug(True)
port = 8080
bottle.run(app=app,host='127.0.0.1',port=port)
|
[] |
[] |
[
"REDIS_URL"
] |
[]
|
["REDIS_URL"]
|
python
| 1 | 0 | |
vendor/github.com/docker/docker/internal/test/fixtures/plugin/plugin.go
|
package plugin // import "github.com/docker/docker/internal/test/fixtures/plugin"
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/plugin"
"github.com/docker/docker/registry"
"github.com/pkg/errors"
)
// CreateOpt is passed used to change the default plugin config before
// creating it
type CreateOpt func(*Config)
// Config wraps types.PluginConfig to provide some extra state for options
// extra customizations on the plugin details, such as using a custom binary to
// create the plugin with.
type Config struct {
*types.PluginConfig
binPath string
}
// WithBinary is a CreateOpt to set an custom binary to create the plugin with.
// This binary must be statically compiled.
func WithBinary(bin string) CreateOpt {
return func(cfg *Config) {
cfg.binPath = bin
}
}
// CreateClient is the interface used for `BuildPlugin` to interact with the
// daemon.
type CreateClient interface {
PluginCreate(context.Context, io.Reader, types.PluginCreateOptions) error
}
// Create creates a new plugin with the specified name
func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error {
tmpDir, err := ioutil.TempDir("", "create-test-plugin")
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
tar, err := makePluginBundle(tmpDir, opts...)
if err != nil {
return err
}
defer tar.Close()
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
return c.PluginCreate(ctx, tar, types.PluginCreateOptions{RepoName: name})
}
// CreateInRegistry makes a plugin (locally) and pushes it to a registry.
// This does not use a dockerd instance to create or push the plugin.
// If you just want to create a plugin in some daemon, use `Create`.
//
// This can be useful when testing plugins on swarm where you don't really want
// the plugin to exist on any of the daemons (immediately) and there needs to be
// some way to distribute the plugin.
func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error {
tmpDir, err := ioutil.TempDir("", "create-test-plugin-local")
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
inPath := filepath.Join(tmpDir, "plugin")
if err := os.MkdirAll(inPath, 0755); err != nil {
return errors.Wrap(err, "error creating plugin root")
}
tar, err := makePluginBundle(inPath, opts...)
if err != nil {
return err
}
defer tar.Close()
dummyExec := func(m *plugin.Manager) (plugin.Executor, error) {
return nil, nil
}
regService, err := registry.NewService(registry.ServiceOptions{V2Only: true})
if err != nil {
return err
}
managerConfig := plugin.ManagerConfig{
Store: plugin.NewStore(),
RegistryService: regService,
Root: filepath.Join(tmpDir, "root"),
ExecRoot: "/run/docker", // manager init fails if not set
CreateExecutor: dummyExec,
LogPluginEvent: func(id, name, action string) {}, // panics when not set
}
manager, err := plugin.NewManager(managerConfig)
if err != nil {
return errors.Wrap(err, "error creating plugin manager")
}
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
if err := manager.CreateFromContext(ctx, tar, &types.PluginCreateOptions{RepoName: repo}); err != nil {
return err
}
if auth == nil {
auth = &types.AuthConfig{}
}
err = manager.Push(ctx, repo, nil, auth, ioutil.Discard)
return errors.Wrap(err, "error pushing plugin")
}
func makePluginBundle(inPath string, opts ...CreateOpt) (io.ReadCloser, error) {
p := &types.PluginConfig{
Interface: types.PluginConfigInterface{
Socket: "basic.sock",
Types: []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}},
},
Entrypoint: []string{"/basic"},
}
cfg := &Config{
PluginConfig: p,
}
for _, o := range opts {
o(cfg)
}
if cfg.binPath == "" {
binPath, err := ensureBasicPluginBin()
if err != nil {
return nil, err
}
cfg.binPath = binPath
}
configJSON, err := json.Marshal(p)
if err != nil {
return nil, err
}
if err := ioutil.WriteFile(filepath.Join(inPath, "config.json"), configJSON, 0644); err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(p.Entrypoint[0])), 0755); err != nil {
return nil, errors.Wrap(err, "error creating plugin rootfs dir")
}
// Ensure the mount target paths exist
for _, m := range p.Mounts {
var stat os.FileInfo
if m.Source != nil {
stat, err = os.Stat(*m.Source)
if err != nil && !os.IsNotExist(err) {
return nil, err
}
}
if stat == nil || stat.IsDir() {
var mode os.FileMode = 0755
if stat != nil {
mode = stat.Mode()
}
if err := os.MkdirAll(filepath.Join(inPath, "rootfs", m.Destination), mode); err != nil {
return nil, errors.Wrap(err, "error preparing plugin mount destination path")
}
} else {
if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(m.Destination)), 0755); err != nil {
return nil, errors.Wrap(err, "error preparing plugin mount destination dir")
}
f, err := os.Create(filepath.Join(inPath, "rootfs", m.Destination))
if err != nil && !os.IsExist(err) {
return nil, errors.Wrap(err, "error preparing plugin mount destination file")
}
if f != nil {
f.Close()
}
}
}
if err := archive.NewDefaultArchiver().CopyFileWithTar(cfg.binPath, filepath.Join(inPath, "rootfs", p.Entrypoint[0])); err != nil {
return nil, errors.Wrap(err, "error copying plugin binary to rootfs path")
}
tar, err := archive.Tar(inPath, archive.Uncompressed)
return tar, errors.Wrap(err, "error making plugin archive")
}
func ensureBasicPluginBin() (string, error) {
name := "docker-basic-plugin"
p, err := exec.LookPath(name)
if err == nil {
return p, nil
}
goBin, err := exec.LookPath("go")
if err != nil {
return "", err
}
installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name)
sourcePath := filepath.Join("github.com", "docker", "docker", "internal", "test", "fixtures", "plugin", "basic")
cmd := exec.Command(goBin, "build", "-o", installPath, sourcePath)
cmd.Env = append(os.Environ(), "CGO_ENABLED=0")
if out, err := cmd.CombinedOutput(); err != nil {
return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out))
}
return installPath, nil
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
molecule/default/tests/test_default.py
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_phpcpd_is_installed(host):
f = host.file('/usr/local/bin/phpcpd')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
|
[] |
[] |
[
"MOLECULE_INVENTORY_FILE"
] |
[]
|
["MOLECULE_INVENTORY_FILE"]
|
python
| 1 | 0 | |
aodhclient/shell.py
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import logging
import os
import sys
import warnings
from cliff import app
from cliff import commandmanager
from keystoneauth1 import exceptions
from keystoneauth1 import loading
from aodhclient import __version__
from aodhclient import client
from aodhclient import noauth
from aodhclient.v2 import alarm_cli
from aodhclient.v2 import alarm_history_cli
from aodhclient.v2 import capabilities_cli
class AodhCommandManager(commandmanager.CommandManager):
SHELL_COMMANDS = {
"alarm create": alarm_cli.CliAlarmCreate,
"alarm delete": alarm_cli.CliAlarmDelete,
"alarm list": alarm_cli.CliAlarmList,
"alarm show": alarm_cli.CliAlarmShow,
"alarm update": alarm_cli.CliAlarmUpdate,
"alarm state get": alarm_cli.CliAlarmStateGet,
"alarm state set": alarm_cli.CliAlarmStateSet,
"alarm-history show": alarm_history_cli.CliAlarmHistoryShow,
"alarm-history search": alarm_history_cli.CliAlarmHistorySearch,
"capabilities list": capabilities_cli.CliCapabilitiesList,
}
def load_commands(self, namespace):
for name, command_class in self.SHELL_COMMANDS.items():
self.add_command(name, command_class)
class AodhShell(app.App):
def __init__(self):
super(AodhShell, self).__init__(
description='Aodh command line client',
version=__version__,
command_manager=AodhCommandManager('aodhclient'),
deferred_help=True,
)
self._client = None
def build_option_parser(self, description, version):
"""Return an argparse option parser for this application.
Subclasses may override this method to extend
the parser with more global options.
:param description: full description of the application
:paramtype description: str
:param version: version number for the application
:paramtype version: str
"""
parser = super(AodhShell, self).build_option_parser(
description, version, argparse_kwargs={'allow_abbrev': False})
# Global arguments, one day this should go to keystoneauth1
parser.add_argument(
'--os-region-name',
metavar='<auth-region-name>',
dest='region_name',
default=os.environ.get('OS_REGION_NAME'),
help='Authentication region name (Env: OS_REGION_NAME)')
parser.add_argument(
'--os-interface',
metavar='<interface>',
dest='interface',
choices=['admin', 'public', 'internal'],
default=os.environ.get('OS_INTERFACE'),
help='Select an interface type.'
' Valid interface types: [admin, public, internal].'
' (Env: OS_INTERFACE)')
parser.add_argument(
'--aodh-api-version',
default=os.environ.get('AODH_API_VERSION', '2'),
help='Defaults to env[AODH_API_VERSION] or 2.')
loading.register_session_argparse_arguments(parser=parser)
plugin = loading.register_auth_argparse_arguments(
parser=parser, argv=sys.argv, default="password")
if not isinstance(plugin, noauth.AodhNoAuthLoader):
parser.add_argument(
'--aodh-endpoint',
metavar='<endpoint>',
dest='endpoint',
default=os.environ.get('AODH_ENDPOINT'),
help='Aodh endpoint (Env: AODH_ENDPOINT)')
return parser
@property
def client(self):
# NOTE(sileht): we lazy load the client to not
# load/connect auth stuffs
if self._client is None:
if hasattr(self.options, "endpoint"):
endpoint_override = self.options.endpoint
else:
endpoint_override = None
auth_plugin = loading.load_auth_from_argparse_arguments(
self.options)
session = loading.load_session_from_argparse_arguments(
self.options, auth=auth_plugin)
self._client = client.Client(self.options.aodh_api_version,
session=session,
interface=self.options.interface,
region_name=self.options.region_name,
endpoint_override=endpoint_override)
return self._client
def clean_up(self, cmd, result, err):
if isinstance(err, exceptions.HttpError) and err.details:
print(err.details, file=sys.stderr)
def configure_logging(self):
if self.options.debug:
# --debug forces verbose_level 3
# Set this here so cliff.app.configure_logging() can work
self.options.verbose_level = 3
super(AodhShell, self).configure_logging()
root_logger = logging.getLogger('')
# Set logging to the requested level
if self.options.verbose_level == 0:
# --quiet
root_logger.setLevel(logging.ERROR)
warnings.simplefilter("ignore")
elif self.options.verbose_level == 1:
# This is the default case, no --debug, --verbose or --quiet
root_logger.setLevel(logging.WARNING)
warnings.simplefilter("ignore")
elif self.options.verbose_level == 2:
# One --verbose
root_logger.setLevel(logging.INFO)
warnings.simplefilter("once")
elif self.options.verbose_level >= 3:
# Two or more --verbose
root_logger.setLevel(logging.DEBUG)
# Hide some useless message
requests_log = logging.getLogger("requests")
cliff_log = logging.getLogger('cliff')
stevedore_log = logging.getLogger('stevedore')
iso8601_log = logging.getLogger("iso8601")
cliff_log.setLevel(logging.ERROR)
stevedore_log.setLevel(logging.ERROR)
iso8601_log.setLevel(logging.ERROR)
if self.options.debug:
requests_log.setLevel(logging.DEBUG)
else:
requests_log.setLevel(logging.ERROR)
def main(args=None):
if args is None:
args = sys.argv[1:]
return AodhShell().run(args)
|
[] |
[] |
[
"OS_REGION_NAME",
"AODH_ENDPOINT",
"AODH_API_VERSION",
"OS_INTERFACE"
] |
[]
|
["OS_REGION_NAME", "AODH_ENDPOINT", "AODH_API_VERSION", "OS_INTERFACE"]
|
python
| 4 | 0 | |
clipboard_unix.go
|
// Copyright 2013 @atotto. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd linux netbsd openbsd solaris dragonfly
package clipboard
import (
"errors"
"os"
"os/exec"
)
const (
xsel = "xsel"
xclip = "xclip"
powershellExe = "powershell.exe"
clipExe = "clip.exe"
wlcopy = "wl-copy"
wlpaste = "wl-paste"
termuxClipboardGet = "termux-clipboard-get"
termuxClipboardSet = "termux-clipboard-set"
)
var (
Primary bool
trimDos bool
pasteCmdArgs []string
copyCmdArgs []string
xselPasteArgs = []string{xsel, "--output", "--clipboard"}
xselCopyArgs = []string{xsel, "--input", "--clipboard"}
xclipPasteArgs = []string{xclip, "-out", "-selection", "clipboard"}
xclipCopyArgs = []string{xclip, "-in", "-selection", "clipboard"}
powershellExePasteArgs = []string{powershellExe, "Get-Clipboard"}
clipExeCopyArgs = []string{clipExe}
wlpasteArgs = []string{wlpaste, "--no-newline"}
wlcopyArgs = []string{wlcopy}
termuxPasteArgs = []string{termuxClipboardGet}
termuxCopyArgs = []string{termuxClipboardSet}
missingCommands = errors.New("No clipboard utilities available. Please install xsel, xclip, wl-clipboard or Termux:API add-on for termux-clipboard-get/set.")
)
func init() {
if os.Getenv("WAYLAND_DISPLAY") != "" {
pasteCmdArgs = wlpasteArgs
copyCmdArgs = wlcopyArgs
if _, err := exec.LookPath(wlcopy); err == nil {
if _, err := exec.LookPath(wlpaste); err == nil {
return
}
}
}
if os.Getenv("WSL_DISTRO_NAME") != "" {
pasteCmdArgs = powershellExePasteArgs
copyCmdArgs = clipExeCopyArgs
trimDos = true
if _, err := exec.LookPath(clipExe); err == nil {
if _, err := exec.LookPath(powershellExe); err == nil {
return
}
}
}
pasteCmdArgs = xclipPasteArgs
copyCmdArgs = xclipCopyArgs
if _, err := exec.LookPath(xclip); err == nil {
return
}
pasteCmdArgs = xselPasteArgs
copyCmdArgs = xselCopyArgs
if _, err := exec.LookPath(xsel); err == nil {
return
}
pasteCmdArgs = termuxPasteArgs
copyCmdArgs = termuxCopyArgs
if _, err := exec.LookPath(termuxClipboardSet); err == nil {
if _, err := exec.LookPath(termuxClipboardGet); err == nil {
return
}
}
Unsupported = true
}
func getPasteCommand() *exec.Cmd {
if Primary {
pasteCmdArgs = pasteCmdArgs[:1]
}
return exec.Command(pasteCmdArgs[0], pasteCmdArgs[1:]...)
}
func getCopyCommand() *exec.Cmd {
if Primary {
copyCmdArgs = copyCmdArgs[:1]
}
return exec.Command(copyCmdArgs[0], copyCmdArgs[1:]...)
}
func readAll() (string, error) {
if Unsupported {
return "", missingCommands
}
pasteCmd := getPasteCommand()
out, err := pasteCmd.Output()
if err != nil {
return "", err
}
result := string(out)
if trimDos && len(result) > 1 {
result = result[:len(result)-2]
}
return result, nil
}
func writeAll(text string) error {
if Unsupported {
return missingCommands
}
copyCmd := getCopyCommand()
in, err := copyCmd.StdinPipe()
if err != nil {
return err
}
if err := copyCmd.Start(); err != nil {
return err
}
if _, err := in.Write([]byte(text)); err != nil {
return err
}
if err := in.Close(); err != nil {
return err
}
return copyCmd.Wait()
}
|
[
"\"WAYLAND_DISPLAY\"",
"\"WSL_DISTRO_NAME\""
] |
[] |
[
"WSL_DISTRO_NAME",
"WAYLAND_DISPLAY"
] |
[]
|
["WSL_DISTRO_NAME", "WAYLAND_DISPLAY"]
|
go
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.