filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
test/e2e/app_management_test.go
|
package e2e
import (
"context"
"fmt"
"math/rand"
"os"
"path"
"reflect"
"regexp"
"strings"
"testing"
"time"
"github.com/argoproj/gitops-engine/pkg/diff"
"github.com/argoproj/gitops-engine/pkg/health"
. "github.com/argoproj/gitops-engine/pkg/sync/common"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/argoproj/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
"github.com/argoproj/argo-cd/v2/common"
applicationpkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/application"
repositorypkg "github.com/argoproj/argo-cd/v2/pkg/apiclient/repository"
. "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
. "github.com/argoproj/argo-cd/v2/test/e2e/fixture"
. "github.com/argoproj/argo-cd/v2/test/e2e/fixture/app"
projectFixture "github.com/argoproj/argo-cd/v2/test/e2e/fixture/project"
repoFixture "github.com/argoproj/argo-cd/v2/test/e2e/fixture/repos"
"github.com/argoproj/argo-cd/v2/test/e2e/testdata"
. "github.com/argoproj/argo-cd/v2/util/argo"
. "github.com/argoproj/argo-cd/v2/util/errors"
"github.com/argoproj/argo-cd/v2/util/io"
"github.com/argoproj/argo-cd/v2/util/settings"
)
const (
guestbookPath = "guestbook"
guestbookPathLocal = "./testdata/guestbook_local"
globalWithNoNameSpace = "global-with-no-namespace"
guestbookWithNamespace = "guestbook-with-namespace"
)
func TestSyncToUnsignedCommit(t *testing.T) {
SkipOnEnv(t, "GPG")
Given(t).
Project("gpg").
Path(guestbookPath).
When().
IgnoreErrors().
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationError)).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(HealthIs(health.HealthStatusMissing))
}
func TestSyncToSignedCommitWithoutKnownKey(t *testing.T) {
SkipOnEnv(t, "GPG")
Given(t).
Project("gpg").
Path(guestbookPath).
When().
AddSignedFile("test.yaml", "null").
IgnoreErrors().
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationError)).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(HealthIs(health.HealthStatusMissing))
}
func TestSyncToSignedCommitKeyWithKnownKey(t *testing.T) {
SkipOnEnv(t, "GPG")
Given(t).
Project("gpg").
Path(guestbookPath).
GPGPublicKeyAdded().
Sleep(2).
When().
AddSignedFile("test.yaml", "null").
IgnoreErrors().
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(health.HealthStatusHealthy))
}
func TestAppCreation(t *testing.T) {
ctx := Given(t)
ctx.
Path(guestbookPath).
When().
CreateApp().
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
assert.Equal(t, Name(), app.Name)
assert.Equal(t, RepoURL(RepoURLTypeFile), app.Spec.Source.RepoURL)
assert.Equal(t, guestbookPath, app.Spec.Source.Path)
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
assert.Equal(t, KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
}).
Expect(Event(EventReasonResourceCreated, "create")).
And(func(_ *Application) {
// app should be listed
output, err := RunCli("app", "list")
assert.NoError(t, err)
assert.Contains(t, output, Name())
}).
When().
// ensure that create is idempotent
CreateApp().
Then().
Given().
Revision("master").
When().
// ensure that update replaces spec and merge labels and annotations
And(func() {
FailOnErr(AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Patch(context.Background(),
ctx.GetName(), types.MergePatchType, []byte(`{"metadata": {"labels": { "test": "label" }, "annotations": { "test": "annotation" }}}`), metav1.PatchOptions{}))
}).
CreateApp("--upsert").
Then().
And(func(app *Application) {
assert.Equal(t, "label", app.Labels["test"])
assert.Equal(t, "annotation", app.Annotations["test"])
assert.Equal(t, "master", app.Spec.Source.TargetRevision)
})
}
func TestAppCreationWithoutForceUpdate(t *testing.T) {
ctx := Given(t)
ctx.
Path(guestbookPath).
DestName("in-cluster").
When().
CreateApp().
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
assert.Equal(t, Name(), app.Name)
assert.Equal(t, RepoURL(RepoURLTypeFile), app.Spec.Source.RepoURL)
assert.Equal(t, guestbookPath, app.Spec.Source.Path)
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
assert.Equal(t, "in-cluster", app.Spec.Destination.Name)
}).
Expect(Event(EventReasonResourceCreated, "create")).
And(func(_ *Application) {
// app should be listed
output, err := RunCli("app", "list")
assert.NoError(t, err)
assert.Contains(t, output, Name())
}).
When().
IgnoreErrors().
CreateApp().
Then().
Expect(Error("", "existing application spec is different, use upsert flag to force update"))
}
func TestDeleteAppResource(t *testing.T) {
ctx := Given(t)
ctx.
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(_ *Application) {
// app should be listed
if _, err := RunCli("app", "delete-resource", Name(), "--kind", "Service", "--resource-name", "guestbook-ui"); err != nil {
assert.NoError(t, err)
}
}).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(HealthIs(health.HealthStatusMissing))
}
// demonstrate that we cannot use a standard sync when an immutable field is changed, we must use "force"
func TestImmutableChange(t *testing.T) {
SkipOnEnv(t, "OPENSHIFT")
text := FailOnErr(Run(".", "kubectl", "get", "service", "-n", "kube-system", "kube-dns", "-o", "jsonpath={.spec.clusterIP}")).(string)
parts := strings.Split(text, ".")
n := rand.Intn(254)
ip1 := fmt.Sprintf("%s.%s.%s.%d", parts[0], parts[1], parts[2], n)
ip2 := fmt.Sprintf("%s.%s.%s.%d", parts[0], parts[1], parts[2], n+1)
Given(t).
Path("service").
When().
CreateApp().
PatchFile("service.yaml", fmt.Sprintf(`[{"op": "add", "path": "/spec/clusterIP", "value": "%s"}]`, ip1)).
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(health.HealthStatusHealthy)).
When().
PatchFile("service.yaml", fmt.Sprintf(`[{"op": "add", "path": "/spec/clusterIP", "value": "%s"}]`, ip2)).
IgnoreErrors().
Sync().
DoNotIgnoreErrors().
Then().
Expect(OperationPhaseIs(OperationFailed)).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(ResourceResultNumbering(1)).
Expect(ResourceResultMatches(ResourceResult{
Kind: "Service",
Version: "v1",
Namespace: DeploymentNamespace(),
Name: "my-service",
SyncPhase: "Sync",
Status: "SyncFailed",
HookPhase: "Failed",
Message: `Service "my-service" is invalid`,
})).
// now we can do this will a force
Given().
Force().
When().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(health.HealthStatusHealthy))
}
func TestInvalidAppProject(t *testing.T) {
Given(t).
Path(guestbookPath).
Project("does-not-exist").
When().
IgnoreErrors().
CreateApp().
Then().
Expect(Error("", "application references project does-not-exist which does not exist"))
}
func TestAppDeletion(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
When().
Delete(true).
Then().
Expect(DoesNotExist()).
Expect(Event(EventReasonResourceDeleted, "delete"))
output, err := RunCli("app", "list")
assert.NoError(t, err)
assert.NotContains(t, output, Name())
}
func TestAppLabels(t *testing.T) {
Given(t).
Path("config-map").
When().
CreateApp("-l", "foo=bar").
Then().
And(func(app *Application) {
assert.Contains(t, FailOnErr(RunCli("app", "list")), Name())
assert.Contains(t, FailOnErr(RunCli("app", "list", "-l", "foo=bar")), Name())
assert.NotContains(t, FailOnErr(RunCli("app", "list", "-l", "foo=rubbish")), Name())
}).
Given().
// remove both name and replace labels means nothing will sync
Name("").
When().
IgnoreErrors().
Sync("-l", "foo=rubbish").
DoNotIgnoreErrors().
Then().
Expect(Error("", "no apps match selector foo=rubbish")).
// check we can update the app and it is then sync'd
Given().
When().
Sync("-l", "foo=bar")
}
func TestTrackAppStateAndSyncApp(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(health.HealthStatusHealthy)).
Expect(Success(fmt.Sprintf("Service %s guestbook-ui Synced ", DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui Synced", DeploymentNamespace()))).
Expect(Event(EventReasonResourceUpdated, "sync")).
And(func(app *Application) {
assert.NotNil(t, app.Status.OperationState.SyncResult)
})
}
func TestAppRollbackSuccessful(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.NotEmpty(t, app.Status.Sync.Revision)
}).
And(func(app *Application) {
appWithHistory := app.DeepCopy()
appWithHistory.Status.History = []RevisionHistory{{
ID: 1,
Revision: app.Status.Sync.Revision,
DeployedAt: metav1.Time{Time: metav1.Now().UTC().Add(-1 * time.Minute)},
Source: app.Spec.Source,
}, {
ID: 2,
Revision: "cdb",
DeployedAt: metav1.Time{Time: metav1.Now().UTC().Add(-2 * time.Minute)},
Source: app.Spec.Source,
}}
patch, _, err := diff.CreateTwoWayMergePatch(app, appWithHistory, &Application{})
assert.NoError(t, err)
app, err = AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
assert.NoError(t, err)
// sync app and make sure it reaches InSync state
_, err = RunCli("app", "rollback", app.Name, "1")
assert.NoError(t, err)
}).
Expect(Event(EventReasonOperationStarted, "rollback")).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Equal(t, SyncStatusCodeSynced, app.Status.Sync.Status)
assert.NotNil(t, app.Status.OperationState.SyncResult)
assert.Equal(t, 2, len(app.Status.OperationState.SyncResult.Resources))
assert.Equal(t, OperationSucceeded, app.Status.OperationState.Phase)
assert.Equal(t, 3, len(app.Status.History))
})
}
func TestComparisonFailsIfClusterNotAdded(t *testing.T) {
Given(t).
Path(guestbookPath).
DestServer("https://not-registered-cluster/api").
When().
IgnoreErrors().
CreateApp().
Then().
Expect(DoesNotExist())
}
func TestCannotSetInvalidPath(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
IgnoreErrors().
AppSet("--path", "garbage").
Then().
Expect(Error("", "app path does not exist"))
}
func TestManipulateApplicationResources(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
manifests, err := RunCli("app", "manifests", app.Name, "--source", "live")
assert.NoError(t, err)
resources, err := kube.SplitYAML([]byte(manifests))
assert.NoError(t, err)
index := -1
for i := range resources {
if resources[i].GetKind() == kube.DeploymentKind {
index = i
break
}
}
assert.True(t, index > -1)
deployment := resources[index]
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer io.Close(closer)
_, err = client.DeleteResource(context.Background(), &applicationpkg.ApplicationResourceDeleteRequest{
Name: &app.Name,
Group: deployment.GroupVersionKind().Group,
Kind: deployment.GroupVersionKind().Kind,
Version: deployment.GroupVersionKind().Version,
Namespace: deployment.GetNamespace(),
ResourceName: deployment.GetName(),
})
assert.NoError(t, err)
}).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync))
}
func assetSecretDataHidden(t *testing.T, manifest string) {
secret, err := UnmarshalToUnstructured(manifest)
assert.NoError(t, err)
_, hasStringData, err := unstructured.NestedMap(secret.Object, "stringData")
assert.NoError(t, err)
assert.False(t, hasStringData)
secretData, hasData, err := unstructured.NestedMap(secret.Object, "data")
assert.NoError(t, err)
assert.True(t, hasData)
for _, v := range secretData {
assert.Regexp(t, regexp.MustCompile(`[*]*`), v)
}
var lastAppliedConfigAnnotation string
annotations := secret.GetAnnotations()
if annotations != nil {
lastAppliedConfigAnnotation = annotations[v1.LastAppliedConfigAnnotation]
}
if lastAppliedConfigAnnotation != "" {
assetSecretDataHidden(t, lastAppliedConfigAnnotation)
}
}
func TestAppWithSecrets(t *testing.T) {
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer io.Close(closer)
Given(t).
Path("secrets").
When().
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
res := FailOnErr(client.GetResource(context.Background(), &applicationpkg.ApplicationResourceRequest{
Namespace: app.Spec.Destination.Namespace,
Kind: kube.SecretKind,
Group: "",
Name: &app.Name,
Version: "v1",
ResourceName: "test-secret",
})).(*applicationpkg.ApplicationResourceResponse)
assetSecretDataHidden(t, res.Manifest)
manifests, err := client.GetManifests(context.Background(), &applicationpkg.ApplicationManifestQuery{Name: &app.Name})
errors.CheckError(err)
for _, manifest := range manifests.Manifests {
assetSecretDataHidden(t, manifest)
}
diffOutput := FailOnErr(RunCli("app", "diff", app.Name)).(string)
assert.Empty(t, diffOutput)
// make sure resource update error does not print secret details
_, err = RunCli("app", "patch-resource", "test-app-with-secrets", "--resource-name", "test-secret",
"--kind", "Secret", "--patch", `{"op": "add", "path": "/data", "value": "hello"}'`,
"--patch-type", "application/json-patch+json")
require.Error(t, err)
assert.Contains(t, err.Error(), fmt.Sprintf("failed to patch Secret %s/test-secret", DeploymentNamespace()))
assert.NotContains(t, err.Error(), "username")
assert.NotContains(t, err.Error(), "password")
// patch secret and make sure app is out of sync and diff detects the change
FailOnErr(KubeClientset.CoreV1().Secrets(DeploymentNamespace()).Patch(context.Background(),
"test-secret", types.JSONPatchType, []byte(`[
{"op": "remove", "path": "/data/username"},
{"op": "add", "path": "/stringData", "value": {"password": "foo"}}
]`), metav1.PatchOptions{}))
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
diffOutput, err := RunCli("app", "diff", app.Name)
assert.Error(t, err)
assert.Contains(t, diffOutput, "username: ++++++++")
assert.Contains(t, diffOutput, "password: ++++++++++++")
// local diff should ignore secrets
diffOutput = FailOnErr(RunCli("app", "diff", app.Name, "--local", "testdata/secrets")).(string)
assert.Empty(t, diffOutput)
// ignore missing field and make sure diff shows no difference
app.Spec.IgnoreDifferences = []ResourceIgnoreDifferences{{
Kind: kube.SecretKind, JSONPointers: []string{"/data"},
}}
FailOnErr(client.UpdateSpec(context.Background(), &applicationpkg.ApplicationUpdateSpecRequest{Name: &app.Name, Spec: app.Spec}))
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
diffOutput := FailOnErr(RunCli("app", "diff", app.Name)).(string)
assert.Empty(t, diffOutput)
}).
// verify not committed secret also ignore during diffing
When().
WriteFile("secret3.yaml", `
apiVersion: v1
kind: Secret
metadata:
name: test-secret3
stringData:
username: test-username`).
Then().
And(func(app *Application) {
diffOutput := FailOnErr(RunCli("app", "diff", app.Name, "--local", "testdata/secrets")).(string)
assert.Empty(t, diffOutput)
})
}
func TestResourceDiffing(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
// Patch deployment
_, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Patch(context.Background(),
"guestbook-ui", types.JSONPatchType, []byte(`[{ "op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "test" }]`), metav1.PatchOptions{})
assert.NoError(t, err)
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
diffOutput, err := RunCli("app", "diff", app.Name, "--local", "testdata/guestbook")
assert.Error(t, err)
assert.Contains(t, diffOutput, fmt.Sprintf("===== apps/Deployment %s/guestbook-ui ======", DeploymentNamespace()))
}).
Given().
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {
IgnoreDifferences: OverrideIgnoreDiff{JSONPointers: []string{"/spec/template/spec/containers/0/image"}},
}}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
diffOutput, err := RunCli("app", "diff", app.Name, "--local", "testdata/guestbook")
assert.NoError(t, err)
assert.Empty(t, diffOutput)
}).
Given().
When().
And(func() {
output, err := RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
assert.NoError(t, err)
assert.Contains(t, output, "serverside-applied")
}).
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Given().
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {
IgnoreDifferences: OverrideIgnoreDiff{
ManagedFieldsManagers: []string{"revision-history-manager"},
JSONPointers: []string{"/spec/template/spec/containers/0/image"},
},
}}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Given().
When().
Sync().
PatchApp(`[{
"op": "add",
"path": "/spec/syncPolicy",
"value": { "syncOptions": ["RespectIgnoreDifferences=true"] }
}]`).
And(func() {
deployment, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Get(context.Background(), "guestbook-ui", metav1.GetOptions{})
assert.NoError(t, err)
assert.Equal(t, int32(3), *deployment.Spec.RevisionHistoryLimit)
}).
And(func() {
output, err := RunWithStdin(testdata.SSARevisionHistoryDeployment, "", "kubectl", "apply", "-n", DeploymentNamespace(), "--server-side=true", "--field-manager=revision-history-manager", "--validate=false", "--force-conflicts", "-f", "-")
assert.NoError(t, err)
assert.Contains(t, output, "serverside-applied")
}).
Then().
When().Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
deployment, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Get(context.Background(), "guestbook-ui", metav1.GetOptions{})
assert.NoError(t, err)
assert.Equal(t, int32(1), *deployment.Spec.RevisionHistoryLimit)
}).
When().Sync().Then().Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
deployment, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Get(context.Background(), "guestbook-ui", metav1.GetOptions{})
assert.NoError(t, err)
assert.Equal(t, int32(1), *deployment.Spec.RevisionHistoryLimit)
})
}
func TestCRDs(t *testing.T) {
testEdgeCasesApplicationResources(t, "crd-creation", health.HealthStatusHealthy)
}
func TestKnownTypesInCRDDiffing(t *testing.T) {
dummiesGVR := schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "dummies"}
Given(t).
Path("crd-creation").
When().CreateApp().Sync().Then().
Expect(OperationPhaseIs(OperationSucceeded)).Expect(SyncStatusIs(SyncStatusCodeSynced)).
When().
And(func() {
dummyResIf := DynamicClientset.Resource(dummiesGVR).Namespace(DeploymentNamespace())
patchData := []byte(`{"spec":{"cpu": "2"}}`)
FailOnErr(dummyResIf.Patch(context.Background(), "dummy-crd-instance", types.MergePatchType, patchData, metav1.PatchOptions{}))
}).Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
When().
And(func() {
SetResourceOverrides(map[string]ResourceOverride{
"argoproj.io/Dummy": {
KnownTypeFields: []KnownTypeField{{
Field: "spec",
Type: "core/v1/ResourceList",
}},
},
})
}).
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced))
}
func TestDuplicatedResources(t *testing.T) {
testEdgeCasesApplicationResources(t, "duplicated-resources", health.HealthStatusHealthy)
}
func TestConfigMap(t *testing.T) {
testEdgeCasesApplicationResources(t, "config-map", health.HealthStatusHealthy, "my-map Synced configmap/my-map created")
}
func TestFailedConversion(t *testing.T) {
if os.Getenv("ARGOCD_E2E_K3S") == "true" {
t.SkipNow()
}
defer func() {
FailOnErr(Run("", "kubectl", "delete", "apiservice", "v1beta1.metrics.k8s.io"))
}()
testEdgeCasesApplicationResources(t, "failed-conversion", health.HealthStatusProgressing)
}
func testEdgeCasesApplicationResources(t *testing.T, appPath string, statusCode health.HealthStatusCode, message ...string) {
expect := Given(t).
Path(appPath).
When().
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced))
for i := range message {
expect = expect.Expect(Success(message[i]))
}
expect.
Expect(HealthIs(statusCode)).
And(func(app *Application) {
diffOutput, err := RunCli("app", "diff", app.Name, "--local", path.Join("testdata", appPath))
assert.Empty(t, diffOutput)
assert.NoError(t, err)
})
}
func TestKsonnetApp(t *testing.T) {
SkipOnEnv(t, "KSONNET")
Given(t).
Path("ksonnet").
Env("prod").
// Null out dest server to verify that destination is inferred from ksonnet app
Parameter("guestbook-ui=image=gcr.io/heptio-images/ks-guestbook-demo:0.1").
DestServer("").
When().
CreateApp().
Sync().
Then().
And(func(app *Application) {
closer, client, err := ArgoCDClientset.NewRepoClient()
assert.NoError(t, err)
defer io.Close(closer)
details, err := client.GetAppDetails(context.Background(), &repositorypkg.RepoAppDetailsQuery{
Source: &app.Spec.Source,
})
assert.NoError(t, err)
serviceType := ""
for _, param := range details.Ksonnet.Parameters {
if param.Name == "type" && param.Component == "guestbook-ui" {
serviceType = param.Value
}
}
assert.Equal(t, serviceType, "LoadBalancer")
})
}
const actionsConfig = `discovery.lua: return { sample = {} }
definitions:
- name: sample
action.lua: |
obj.metadata.labels.sample = 'test'
return obj`
func TestResourceAction(t *testing.T) {
Given(t).
Path(guestbookPath).
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {Actions: actionsConfig}}).
When().
CreateApp().
Sync().
Then().
And(func(app *Application) {
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer io.Close(closer)
actions, err := client.ListResourceActions(context.Background(), &applicationpkg.ApplicationResourceRequest{
Name: &app.Name,
Group: "apps",
Kind: "Deployment",
Version: "v1",
Namespace: DeploymentNamespace(),
ResourceName: "guestbook-ui",
})
assert.NoError(t, err)
assert.Equal(t, []ResourceAction{{Name: "sample", Disabled: false}}, actions.Actions)
_, err = client.RunResourceAction(context.Background(), &applicationpkg.ResourceActionRunRequest{Name: &app.Name,
Group: "apps",
Kind: "Deployment",
Version: "v1",
Namespace: DeploymentNamespace(),
ResourceName: "guestbook-ui",
Action: "sample",
})
assert.NoError(t, err)
deployment, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Get(context.Background(), "guestbook-ui", metav1.GetOptions{})
assert.NoError(t, err)
assert.Equal(t, "test", deployment.Labels["sample"])
})
}
func TestSyncResourceByLabel(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
And(func(app *Application) {
_, _ = RunCli("app", "sync", app.Name, "--label", fmt.Sprintf("app.kubernetes.io/instance=%s", app.Name))
}).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
_, err := RunCli("app", "sync", app.Name, "--label", "this-label=does-not-exist")
assert.Error(t, err)
assert.Contains(t, err.Error(), "level=fatal")
})
}
func TestLocalManifestSync(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
And(func(app *Application) {
res, _ := RunCli("app", "manifests", app.Name)
assert.Contains(t, res, "containerPort: 80")
assert.Contains(t, res, "image: gcr.io/heptio-images/ks-guestbook-demo:0.2")
}).
Given().
LocalPath(guestbookPathLocal).
When().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
res, _ := RunCli("app", "manifests", app.Name)
assert.Contains(t, res, "containerPort: 81")
assert.Contains(t, res, "image: gcr.io/heptio-images/ks-guestbook-demo:0.3")
}).
Given().
LocalPath("").
When().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
res, _ := RunCli("app", "manifests", app.Name)
assert.Contains(t, res, "containerPort: 80")
assert.Contains(t, res, "image: gcr.io/heptio-images/ks-guestbook-demo:0.2")
})
}
func TestLocalSync(t *testing.T) {
Given(t).
// we've got to use Helm as this uses kubeVersion
Path("helm").
When().
CreateApp().
Then().
And(func(app *Application) {
FailOnErr(RunCli("app", "sync", app.Name, "--local", "testdata/helm"))
})
}
func TestNoLocalSyncWithAutosyncEnabled(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
And(func(app *Application) {
_, err := RunCli("app", "set", app.Name, "--sync-policy", "automated")
assert.NoError(t, err)
_, err = RunCli("app", "sync", app.Name, "--local", guestbookPathLocal)
assert.Error(t, err)
})
}
func TestLocalSyncDryRunWithAutosyncEnabled(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
And(func(app *Application) {
_, err := RunCli("app", "set", app.Name, "--sync-policy", "automated")
assert.NoError(t, err)
appBefore := app.DeepCopy()
_, err = RunCli("app", "sync", app.Name, "--dry-run", "--local", guestbookPathLocal)
assert.NoError(t, err)
appAfter := app.DeepCopy()
assert.True(t, reflect.DeepEqual(appBefore, appAfter))
})
}
func TestSyncAsync(t *testing.T) {
Given(t).
Path(guestbookPath).
Async(true).
When().
CreateApp().
Sync().
Then().
Expect(Success("")).
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced))
}
func TestPermissions(t *testing.T) {
EnsureCleanState(t)
appName := Name()
_, err := RunCli("proj", "create", "test")
assert.NoError(t, err)
// make sure app cannot be created without permissions in project
_, err = RunCli("app", "create", appName, "--repo", RepoURL(RepoURLTypeFile),
"--path", guestbookPath, "--project", "test", "--dest-server", KubernetesInternalAPIServerAddr, "--dest-namespace", DeploymentNamespace())
assert.Error(t, err)
sourceError := fmt.Sprintf("application repo %s is not permitted in project 'test'", RepoURL(RepoURLTypeFile))
destinationError := fmt.Sprintf("application destination {%s %s} is not permitted in project 'test'", KubernetesInternalAPIServerAddr, DeploymentNamespace())
assert.Contains(t, err.Error(), sourceError)
assert.Contains(t, err.Error(), destinationError)
proj, err := AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Get(context.Background(), "test", metav1.GetOptions{})
assert.NoError(t, err)
proj.Spec.Destinations = []ApplicationDestination{{Server: "*", Namespace: "*"}}
proj.Spec.SourceRepos = []string{"*"}
proj, err = AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Update(context.Background(), proj, metav1.UpdateOptions{})
assert.NoError(t, err)
// make sure controller report permissions issues in conditions
_, err = RunCli("app", "create", appName, "--repo", RepoURL(RepoURLTypeFile),
"--path", guestbookPath, "--project", "test", "--dest-server", KubernetesInternalAPIServerAddr, "--dest-namespace", DeploymentNamespace())
assert.NoError(t, err)
defer func() {
err = AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Delete(context.Background(), appName, metav1.DeleteOptions{})
assert.NoError(t, err)
}()
proj.Spec.Destinations = []ApplicationDestination{}
proj.Spec.SourceRepos = []string{}
_, err = AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Update(context.Background(), proj, metav1.UpdateOptions{})
assert.NoError(t, err)
time.Sleep(1 * time.Second)
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer io.Close(closer)
refresh := string(RefreshTypeNormal)
app, err := client.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName, Refresh: &refresh})
assert.NoError(t, err)
destinationErrorExist := false
sourceErrorExist := false
for i := range app.Status.Conditions {
if strings.Contains(app.Status.Conditions[i].Message, destinationError) {
destinationErrorExist = true
}
if strings.Contains(app.Status.Conditions[i].Message, sourceError) {
sourceErrorExist = true
}
}
assert.True(t, destinationErrorExist)
assert.True(t, sourceErrorExist)
}
func TestPermissionWithScopedRepo(t *testing.T) {
projName := "argo-project"
projectFixture.
Given(t).
Name(projName).
Destination("*,*").
When().
Create()
repoFixture.Given(t, true).
When().
Path(RepoURL(RepoURLTypeFile)).
Project(projName).
Create()
GivenWithSameState(t).
Project(projName).
RepoURLType(RepoURLTypeFile).
Path("two-nice-pods").
When().
PatchFile("pod-1.yaml", `[{"op": "add", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/sync-options": "Prune=false"}}]`).
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
When().
DeleteFile("pod-1.yaml").
Refresh(RefreshTypeHard).
IgnoreErrors().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(ResourceSyncStatusIs("Pod", "pod-1", SyncStatusCodeOutOfSync))
}
func TestPermissionDeniedWithScopedRepo(t *testing.T) {
projName := "argo-project"
projectFixture.
Given(t).
Name(projName).
Destination("*,*").
When().
Create()
repoFixture.Given(t, true).
When().
Path(RepoURL(RepoURLTypeFile)).
Create()
GivenWithSameState(t).
Project(projName).
RepoURLType(RepoURLTypeFile).
Path("two-nice-pods").
When().
PatchFile("pod-1.yaml", `[{"op": "add", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/sync-options": "Prune=false"}}]`).
IgnoreErrors().
CreateApp().
Then().
Expect(Error("", "is not permitted in project"))
}
// make sure that if we deleted a resource from the app, it is not pruned if annotated with Prune=false
func TestSyncOptionPruneFalse(t *testing.T) {
Given(t).
Path("two-nice-pods").
When().
PatchFile("pod-1.yaml", `[{"op": "add", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/sync-options": "Prune=false"}}]`).
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
When().
DeleteFile("pod-1.yaml").
Refresh(RefreshTypeHard).
IgnoreErrors().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(ResourceSyncStatusIs("Pod", "pod-1", SyncStatusCodeOutOfSync))
}
// make sure that if we have an invalid manifest, we can add it if we disable validation, we get a server error rather than a client error
func TestSyncOptionValidateFalse(t *testing.T) {
// k3s does not validate at all, so this test does not work
if os.Getenv("ARGOCD_E2E_K3S") == "true" {
t.SkipNow()
}
Given(t).
Path("crd-validation").
When().
CreateApp().
Then().
Expect(Success("")).
When().
IgnoreErrors().
Sync().
Then().
// client error
Expect(Error("error validating data", "")).
When().
PatchFile("deployment.yaml", `[{"op": "add", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/sync-options": "Validate=false"}}]`).
Sync().
Then().
// server error
Expect(Error("Error from server", ""))
}
// make sure that, if we have a resource that needs pruning, but we're ignoring it, the app is in-sync
func TestCompareOptionIgnoreExtraneous(t *testing.T) {
Given(t).
Prune(false).
Path("two-nice-pods").
When().
PatchFile("pod-1.yaml", `[{"op": "add", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/compare-options": "IgnoreExtraneous"}}]`).
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
When().
DeleteFile("pod-1.yaml").
Refresh(RefreshTypeHard).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Len(t, app.Status.Resources, 2)
statusByName := map[string]SyncStatusCode{}
for _, r := range app.Status.Resources {
statusByName[r.Name] = r.Status
}
assert.Equal(t, SyncStatusCodeOutOfSync, statusByName["pod-1"])
assert.Equal(t, SyncStatusCodeSynced, statusByName["pod-2"])
}).
When().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced))
}
func TestSelfManagedApps(t *testing.T) {
Given(t).
Path("self-managed-app").
When().
PatchFile("resources.yaml", fmt.Sprintf(`[{"op": "replace", "path": "/spec/source/repoURL", "value": "%s"}]`, RepoURL(RepoURLTypeFile))).
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(a *Application) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
defer cancel()
reconciledCount := 0
var lastReconciledAt *metav1.Time
for event := range ArgoCDClientset.WatchApplicationWithRetry(ctx, a.Name, a.ResourceVersion) {
reconciledAt := event.Application.Status.ReconciledAt
if reconciledAt == nil {
reconciledAt = &metav1.Time{}
}
if lastReconciledAt != nil && !lastReconciledAt.Equal(reconciledAt) {
reconciledCount = reconciledCount + 1
}
lastReconciledAt = reconciledAt
}
assert.True(t, reconciledCount < 3, "Application was reconciled too many times")
})
}
func TestExcludedResource(t *testing.T) {
Given(t).
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {Actions: actionsConfig}}).
Path(guestbookPath).
ResourceFilter(settings.ResourcesFilter{
ResourceExclusions: []settings.FilteredResource{{Kinds: []string{kube.DeploymentKind}}},
}).
When().
CreateApp().
Sync().
Refresh(RefreshTypeNormal).
Then().
Expect(Condition(ApplicationConditionExcludedResourceWarning, "Resource apps/Deployment guestbook-ui is excluded in the settings"))
}
func TestRevisionHistoryLimit(t *testing.T) {
Given(t).
Path("config-map").
When().
CreateApp().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Len(t, app.Status.History, 1)
}).
When().
AppSet("--revision-history-limit", "1").
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Len(t, app.Status.History, 1)
})
}
func TestOrphanedResource(t *testing.T) {
SkipOnEnv(t, "OPENSHIFT")
Given(t).
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: &OrphanedResourcesMonitorSettings{Warn: pointer.BoolPtr(true)},
}).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions()).
When().
And(func() {
FailOnErr(KubeClientset.CoreV1().ConfigMaps(DeploymentNamespace()).Create(context.Background(), &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "orphaned-configmap",
},
}, metav1.CreateOptions{}))
}).
Refresh(RefreshTypeNormal).
Then().
Expect(Condition(ApplicationConditionOrphanedResourceWarning, "Application has 1 orphaned resources")).
And(func(app *Application) {
output, err := RunCli("app", "resources", app.Name)
assert.NoError(t, err)
assert.Contains(t, output, "orphaned-configmap")
}).
Given().
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: &OrphanedResourcesMonitorSettings{Warn: pointer.BoolPtr(true), Ignore: []OrphanedResourceKey{{Group: "Test", Kind: "ConfigMap"}}},
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(Condition(ApplicationConditionOrphanedResourceWarning, "Application has 1 orphaned resources")).
And(func(app *Application) {
output, err := RunCli("app", "resources", app.Name)
assert.NoError(t, err)
assert.Contains(t, output, "orphaned-configmap")
}).
Given().
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: &OrphanedResourcesMonitorSettings{Warn: pointer.BoolPtr(true), Ignore: []OrphanedResourceKey{{Kind: "ConfigMap"}}},
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions()).
And(func(app *Application) {
output, err := RunCli("app", "resources", app.Name)
assert.NoError(t, err)
assert.NotContains(t, output, "orphaned-configmap")
}).
Given().
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: &OrphanedResourcesMonitorSettings{Warn: pointer.BoolPtr(true), Ignore: []OrphanedResourceKey{{Kind: "ConfigMap", Name: "orphaned-configmap"}}},
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions()).
And(func(app *Application) {
output, err := RunCli("app", "resources", app.Name)
assert.NoError(t, err)
assert.NotContains(t, output, "orphaned-configmap")
}).
Given().
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: nil,
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions())
}
func TestNotPermittedResources(t *testing.T) {
ctx := Given(t)
pathType := networkingv1.PathTypePrefix
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "sample-ingress",
Labels: map[string]string{
common.LabelKeyAppInstance: ctx.GetName(),
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{{
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{{
Path: "/",
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "guestbook-ui",
Port: networkingv1.ServiceBackendPort{Number: 80},
},
},
PathType: &pathType,
}},
},
},
}},
},
}
defer func() {
log.Infof("Ingress 'sample-ingress' deleted from %s", ArgoCDNamespace)
CheckError(KubeClientset.NetworkingV1().Ingresses(ArgoCDNamespace).Delete(context.Background(), "sample-ingress", metav1.DeleteOptions{}))
}()
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "guestbook-ui",
Labels: map[string]string{
common.LabelKeyAppInstance: ctx.GetName(),
},
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: 80},
}},
Selector: map[string]string{
"app": "guestbook-ui",
},
},
}
ctx.ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: DeploymentNamespace(), Server: "*"}},
NamespaceResourceBlacklist: []metav1.GroupKind{
{Group: "", Kind: "Service"},
}}).
And(func() {
FailOnErr(KubeClientset.NetworkingV1().Ingresses(ArgoCDNamespace).Create(context.Background(), ingress, metav1.CreateOptions{}))
FailOnErr(KubeClientset.CoreV1().Services(DeploymentNamespace()).Create(context.Background(), svc, metav1.CreateOptions{}))
}).
Path(guestbookPath).
When().
CreateApp().
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
statusByKind := make(map[string]ResourceStatus)
for _, res := range app.Status.Resources {
statusByKind[res.Kind] = res
}
_, hasIngress := statusByKind[kube.IngressKind]
assert.False(t, hasIngress, "Ingress is prohibited not managed object and should be even visible to user")
serviceStatus := statusByKind[kube.ServiceKind]
assert.Equal(t, serviceStatus.Status, SyncStatusCodeUnknown, "Service is prohibited managed resource so should be set to Unknown")
deploymentStatus := statusByKind[kube.DeploymentKind]
assert.Equal(t, deploymentStatus.Status, SyncStatusCodeOutOfSync)
}).
When().
Delete(true).
Then().
Expect(DoesNotExist())
// Make sure prohibited resources are not deleted during application deletion
FailOnErr(KubeClientset.NetworkingV1().Ingresses(ArgoCDNamespace).Get(context.Background(), "sample-ingress", metav1.GetOptions{}))
FailOnErr(KubeClientset.CoreV1().Services(DeploymentNamespace()).Get(context.Background(), "guestbook-ui", metav1.GetOptions{}))
}
func TestSyncWithInfos(t *testing.T) {
expectedInfo := make([]*Info, 2)
expectedInfo[0] = &Info{Name: "name1", Value: "val1"}
expectedInfo[1] = &Info{Name: "name2", Value: "val2"}
Given(t).
Path(guestbookPath).
When().
CreateApp().
Then().
And(func(app *Application) {
_, err := RunCli("app", "sync", app.Name,
"--info", fmt.Sprintf("%s=%s", expectedInfo[0].Name, expectedInfo[0].Value),
"--info", fmt.Sprintf("%s=%s", expectedInfo[1].Name, expectedInfo[1].Value))
assert.NoError(t, err)
}).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.ElementsMatch(t, app.Status.OperationState.Operation.Info, expectedInfo)
})
}
//Given: argocd app create does not provide --dest-namespace
// Manifest contains resource console which does not require namespace
//Expect: no app.Status.Conditions
func TestCreateAppWithNoNameSpaceForGlobalResource(t *testing.T) {
Given(t).
Path(globalWithNoNameSpace).
When().
CreateWithNoNameSpace().
Then().
And(func(app *Application) {
time.Sleep(500 * time.Millisecond)
app, err := AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Get(context.Background(), app.Name, metav1.GetOptions{})
assert.NoError(t, err)
assert.Len(t, app.Status.Conditions, 0)
})
}
//Given: argocd app create does not provide --dest-namespace
// Manifest contains resource deployment, and service which requires namespace
// Deployment and service do not have namespace in manifest
//Expect: app.Status.Conditions for deployment ans service which does not have namespace in manifest
func TestCreateAppWithNoNameSpaceWhenRequired(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateWithNoNameSpace().
Refresh(RefreshTypeNormal).
Then().
And(func(app *Application) {
updatedApp, err := AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Get(context.Background(), app.Name, metav1.GetOptions{})
require.NoError(t, err)
assert.Len(t, updatedApp.Status.Conditions, 2)
assert.Equal(t, updatedApp.Status.Conditions[0].Type, ApplicationConditionInvalidSpecError)
assert.Equal(t, updatedApp.Status.Conditions[1].Type, ApplicationConditionInvalidSpecError)
})
}
//Given: argocd app create does not provide --dest-namespace
// Manifest contains resource deployment, and service which requires namespace
// Some deployment and service has namespace in manifest
// Some deployment and service does not have namespace in manifest
//Expect: app.Status.Conditions for deployment and service which does not have namespace in manifest
func TestCreateAppWithNoNameSpaceWhenRequired2(t *testing.T) {
Given(t).
Path(guestbookWithNamespace).
When().
CreateWithNoNameSpace().
Refresh(RefreshTypeNormal).
Then().
And(func(app *Application) {
updatedApp, err := AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Get(context.Background(), app.Name, metav1.GetOptions{})
require.NoError(t, err)
assert.Len(t, updatedApp.Status.Conditions, 2)
assert.Equal(t, updatedApp.Status.Conditions[0].Type, ApplicationConditionInvalidSpecError)
assert.Equal(t, updatedApp.Status.Conditions[1].Type, ApplicationConditionInvalidSpecError)
})
}
func TestListResource(t *testing.T) {
SkipOnEnv(t, "OPENSHIFT")
Given(t).
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: &OrphanedResourcesMonitorSettings{Warn: pointer.BoolPtr(true)},
}).
Path(guestbookPath).
When().
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions()).
When().
And(func() {
FailOnErr(KubeClientset.CoreV1().ConfigMaps(DeploymentNamespace()).Create(context.Background(), &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "orphaned-configmap",
},
}, metav1.CreateOptions{}))
}).
Refresh(RefreshTypeNormal).
Then().
Expect(Condition(ApplicationConditionOrphanedResourceWarning, "Application has 1 orphaned resources")).
And(func(app *Application) {
output, err := RunCli("app", "resources", app.Name)
assert.NoError(t, err)
assert.Contains(t, output, "orphaned-configmap")
assert.Contains(t, output, "guestbook-ui")
}).
And(func(app *Application) {
output, err := RunCli("app", "resources", app.Name, "--orphaned=true")
assert.NoError(t, err)
assert.Contains(t, output, "orphaned-configmap")
assert.NotContains(t, output, "guestbook-ui")
}).
And(func(app *Application) {
output, err := RunCli("app", "resources", app.Name, "--orphaned=false")
assert.NoError(t, err)
assert.NotContains(t, output, "orphaned-configmap")
assert.Contains(t, output, "guestbook-ui")
}).
Given().
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: nil,
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions())
}
// Given application is set with --sync-option CreateNamespace=true
// application --dest-namespace does not exist
// Verity application --dest-namespace is created
// application sync successful
// when application is deleted, --dest-namespace is not deleted
func TestNamespaceAutoCreation(t *testing.T) {
SkipOnEnv(t, "OPENSHIFT")
updatedNamespace := getNewNamespace(t)
defer func() {
if !t.Skipped() {
_, err := Run("", "kubectl", "delete", "namespace", updatedNamespace)
assert.NoError(t, err)
}
}()
Given(t).
Timeout(30).
Path("guestbook").
When().
CreateApp("--sync-option", "CreateNamespace=true").
Then().
And(func(app *Application) {
//Make sure the namespace we are about to update to does not exist
_, err := Run("", "kubectl", "get", "namespace", updatedNamespace)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not found")
}).
When().
AppSet("--dest-namespace", updatedNamespace).
Sync().
Then().
Expect(Success("")).
Expect(OperationPhaseIs(OperationSucceeded)).Expect(ResourceHealthWithNamespaceIs("Deployment", "guestbook-ui", updatedNamespace, health.HealthStatusHealthy)).
Expect(ResourceHealthWithNamespaceIs("Deployment", "guestbook-ui", updatedNamespace, health.HealthStatusHealthy)).
Expect(ResourceSyncStatusWithNamespaceIs("Deployment", "guestbook-ui", updatedNamespace, SyncStatusCodeSynced)).
Expect(ResourceSyncStatusWithNamespaceIs("Deployment", "guestbook-ui", updatedNamespace, SyncStatusCodeSynced)).
When().
Delete(true).
Then().
Expect(Success("")).
And(func(app *Application) {
//Verify delete app does not delete the namespace auto created
output, err := Run("", "kubectl", "get", "namespace", updatedNamespace)
assert.NoError(t, err)
assert.Contains(t, output, updatedNamespace)
})
}
func TestFailedSyncWithRetry(t *testing.T) {
Given(t).
Path("hook").
When().
PatchFile("hook.yaml", `[{"op": "replace", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/hook": "PreSync"}}]`).
// make hook fail
PatchFile("hook.yaml", `[{"op": "replace", "path": "/spec/containers/0/command", "value": ["false"]}]`).
CreateApp().
IgnoreErrors().
Sync("--retry-limit=1", "--retry-backoff-duration=1s").
Then().
Expect(OperationPhaseIs(OperationFailed)).
Expect(OperationMessageContains("retried 1 times"))
}
func TestCreateDisableValidation(t *testing.T) {
Given(t).
Path("baddir").
When().
CreateApp("--validate=false").
Then().
And(func(app *Application) {
_, err := RunCli("app", "create", app.Name, "--upsert", "--validate=false", "--repo", RepoURL(RepoURLTypeFile),
"--path", "baddir2", "--project", app.Spec.Project, "--dest-server", KubernetesInternalAPIServerAddr, "--dest-namespace", DeploymentNamespace())
assert.NoError(t, err)
}).
When().
AppSet("--path", "baddir3", "--validate=false")
}
func TestCreateFromPartialFile(t *testing.T) {
partialApp :=
`metadata:
labels:
labels.local/from-file: file
labels.local/from-args: file
annotations:
annotations.local/from-file: file
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
syncPolicy:
automated:
prune: true
`
path := "helm-values"
Given(t).
When().
// app should be auto-synced once created
CreateFromPartialFile(partialApp, "--path", path, "-l", "labels.local/from-args=args", "--helm-set", "foo=foo").
Then().
Expect(Success("")).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions()).
And(func(app *Application) {
assert.Equal(t, map[string]string{"labels.local/from-file": "file", "labels.local/from-args": "args"}, app.ObjectMeta.Labels)
assert.Equal(t, map[string]string{"annotations.local/from-file": "file"}, app.ObjectMeta.Annotations)
assert.Equal(t, []string{"resources-finalizer.argocd.argoproj.io"}, app.ObjectMeta.Finalizers)
assert.Equal(t, path, app.Spec.Source.Path)
assert.Equal(t, []HelmParameter{{Name: "foo", Value: "foo"}}, app.Spec.Source.Helm.Parameters)
})
}
// Ensure actions work when using a resource action that modifies status and/or spec
func TestCRDStatusSubresourceAction(t *testing.T) {
actions := `
discovery.lua: |
actions = {}
actions["update-spec"] = {["disabled"] = false}
actions["update-status"] = {["disabled"] = false}
actions["update-both"] = {["disabled"] = false}
return actions
definitions:
- name: update-both
action.lua: |
obj.spec = {}
obj.spec.foo = "update-both"
obj.status = {}
obj.status.bar = "update-both"
return obj
- name: update-spec
action.lua: |
obj.spec = {}
obj.spec.foo = "update-spec"
return obj
- name: update-status
action.lua: |
obj.status = {}
obj.status.bar = "update-status"
return obj
`
Given(t).
Path("crd-subresource").
And(func() {
SetResourceOverrides(map[string]ResourceOverride{
"argoproj.io/StatusSubResource": {
Actions: actions,
},
"argoproj.io/NonStatusSubResource": {
Actions: actions,
},
})
}).
When().CreateApp().Sync().Then().
Expect(OperationPhaseIs(OperationSucceeded)).Expect(SyncStatusIs(SyncStatusCodeSynced)).
When().
Refresh(RefreshTypeNormal).
Then().
// tests resource actions on a CRD using status subresource
And(func(app *Application) {
_, err := RunCli("app", "actions", "run", app.Name, "--kind", "StatusSubResource", "update-both")
assert.NoError(t, err)
text := FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "statussubresources", "status-subresource", "-o", "jsonpath={.spec.foo}")).(string)
assert.Equal(t, "update-both", text)
text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "statussubresources", "status-subresource", "-o", "jsonpath={.status.bar}")).(string)
assert.Equal(t, "update-both", text)
_, err = RunCli("app", "actions", "run", app.Name, "--kind", "StatusSubResource", "update-spec")
assert.NoError(t, err)
text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "statussubresources", "status-subresource", "-o", "jsonpath={.spec.foo}")).(string)
assert.Equal(t, "update-spec", text)
_, err = RunCli("app", "actions", "run", app.Name, "--kind", "StatusSubResource", "update-status")
assert.NoError(t, err)
text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "statussubresources", "status-subresource", "-o", "jsonpath={.status.bar}")).(string)
assert.Equal(t, "update-status", text)
}).
// tests resource actions on a CRD *not* using status subresource
And(func(app *Application) {
_, err := RunCli("app", "actions", "run", app.Name, "--kind", "NonStatusSubResource", "update-both")
assert.NoError(t, err)
text := FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "nonstatussubresources", "non-status-subresource", "-o", "jsonpath={.spec.foo}")).(string)
assert.Equal(t, "update-both", text)
text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "nonstatussubresources", "non-status-subresource", "-o", "jsonpath={.status.bar}")).(string)
assert.Equal(t, "update-both", text)
_, err = RunCli("app", "actions", "run", app.Name, "--kind", "NonStatusSubResource", "update-spec")
assert.NoError(t, err)
text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "nonstatussubresources", "non-status-subresource", "-o", "jsonpath={.spec.foo}")).(string)
assert.Equal(t, "update-spec", text)
_, err = RunCli("app", "actions", "run", app.Name, "--kind", "NonStatusSubResource", "update-status")
assert.NoError(t, err)
text = FailOnErr(Run(".", "kubectl", "-n", app.Spec.Destination.Namespace, "get", "nonstatussubresources", "non-status-subresource", "-o", "jsonpath={.status.bar}")).(string)
assert.Equal(t, "update-status", text)
})
}
func TestAppLogs(t *testing.T) {
SkipOnEnv(t, "OPENSHIFT")
Given(t).
Path("guestbook-logs").
When().
CreateApp().
Sync().
Then().
Expect(HealthIs(health.HealthStatusHealthy)).
And(func(app *Application) {
out, err := RunCli("app", "logs", app.Name, "--kind", "Deployment", "--group", "", "--name", "guestbook-ui")
assert.NoError(t, err)
assert.Contains(t, out, "Hi")
}).
And(func(app *Application) {
out, err := RunCli("app", "logs", app.Name, "--kind", "Pod")
assert.NoError(t, err)
assert.Contains(t, out, "Hi")
}).
And(func(app *Application) {
out, err := RunCli("app", "logs", app.Name, "--kind", "Service")
assert.NoError(t, err)
assert.NotContains(t, out, "Hi")
})
}
func TestAppWaitOperationInProgress(t *testing.T) {
Given(t).
And(func() {
SetResourceOverrides(map[string]ResourceOverride{
"batch/Job": {
HealthLua: `return { status = 'Running' }`,
},
"apps/Deployment": {
HealthLua: `return { status = 'Suspended' }`,
},
})
}).
Async(true).
Path("hook-and-deployment").
When().
CreateApp().
Sync().
Then().
// stuck in running state
Expect(OperationPhaseIs(OperationRunning)).
When().
And(func() {
_, err := RunCli("app", "wait", Name(), "--suspended")
errors.CheckError(err)
})
}
func TestSyncOptionReplace(t *testing.T) {
Given(t).
Path("config-map").
When().
PatchFile("config-map.yaml", `[{"op": "add", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/sync-options": "Replace=true"}}]`).
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Equal(t, app.Status.OperationState.SyncResult.Resources[0].Message, "configmap/my-map created")
}).
When().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Equal(t, app.Status.OperationState.SyncResult.Resources[0].Message, "configmap/my-map replaced")
})
}
func TestSyncOptionReplaceFromCLI(t *testing.T) {
Given(t).
Path("config-map").
Replace().
When().
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Equal(t, app.Status.OperationState.SyncResult.Resources[0].Message, "configmap/my-map created")
}).
When().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Equal(t, app.Status.OperationState.SyncResult.Resources[0].Message, "configmap/my-map replaced")
})
}
func TestDiscoverNewCommit(t *testing.T) {
var sha string
Given(t).
Path("config-map").
When().
CreateApp().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
sha = app.Status.Sync.Revision
assert.NotEmpty(t, sha)
}).
When().
PatchFile("config-map.yaml", `[{"op": "replace", "path": "/data/foo", "value": "hello"}]`).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
// make sure new commit is not discovered immediately after push
And(func(app *Application) {
assert.Equal(t, sha, app.Status.Sync.Revision)
}).
When().
// make sure new commit is not discovered after refresh is requested
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
assert.NotEqual(t, sha, app.Status.Sync.Revision)
})
}
|
[
"\"ARGOCD_E2E_K3S\"",
"\"ARGOCD_E2E_K3S\""
] |
[] |
[
"ARGOCD_E2E_K3S"
] |
[]
|
["ARGOCD_E2E_K3S"]
|
go
| 1 | 0 | |
nox.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import fnmatch
import os
import tempfile
import nox
try:
import ci_diff_helper
except ImportError:
ci_diff_helper = None
#
# Helpers and utility functions
#
def _list_files(folder, pattern):
"""Lists all files below the given folder that match the pattern."""
for root, folders, files in os.walk(folder):
for filename in files:
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(root, filename)
def _collect_dirs(
start_dir,
blacklist=set(['conftest.py', 'nox.py', 'lib']),
suffix='_test.py',
recurse_further=False):
"""Recursively collects a list of dirs that contain a file matching the
given suffix.
This works by listing the contents of directories and finding
directories that have `*_test.py` files.
"""
# Collect all the directories that have tests in them.
for parent, subdirs, files in os.walk(start_dir):
if './.' in parent:
continue # Skip top-level dotfiles
elif any(f for f in files if f.endswith(suffix) and f not in blacklist):
# Don't recurse further for tests, since py.test will do that.
if not recurse_further:
del subdirs[:]
# This dir has desired files in it. yield it.
yield parent
else:
# Filter out dirs we don't want to recurse into
subdirs[:] = [
s for s in subdirs
if s[0].isalpha() and
s not in blacklist]
def _get_changed_files():
"""Returns a list of files changed for this pull request / push.
If running on a public CI like Travis or Circle this is used to only
run tests/lint for changed files.
"""
if not ci_diff_helper:
return None
try:
config = ci_diff_helper.get_config()
except OSError: # Not on CI.
return None
changed_files = ci_diff_helper.get_changed_files('HEAD', config.base)
changed_files = set([
'./{}'.format(filename) for filename in changed_files])
return changed_files
def _filter_samples(sample_dirs, changed_files):
"""Filers the list of sample directories to only include directories that
contain files in the list of changed files."""
result = []
for sample_dir in sample_dirs:
for changed_file in changed_files:
if changed_file.startswith(sample_dir):
result.append(sample_dir)
return list(set(result))
def _determine_local_import_names(start_dir):
"""Determines all import names that should be considered "local".
This is used when running the linter to insure that import order is
properly checked.
"""
file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)]
return [
basename
for basename, extension
in file_ext_pairs
if extension == '.py' or os.path.isdir(
os.path.join(start_dir, basename))
and basename not in ('__pycache__')]
#
# App Engine specific helpers
#
_GAE_ROOT = os.environ.get('GAE_ROOT')
if _GAE_ROOT is None:
_GAE_ROOT = tempfile.mkdtemp()
def _setup_appengine_sdk(session):
"""Installs the App Engine SDK, if needed."""
session.env['GAE_SDK_PATH'] = os.path.join(_GAE_ROOT, 'google_appengine')
session.run('gcp-devrel-py-tools', 'download-appengine-sdk', _GAE_ROOT)
#
# Test sessions
#
PYTEST_COMMON_ARGS = ['--junitxml=sponge_log.xml']
# Ignore I202 "Additional newline in a section of imports." to accommodate
# region tags in import blocks. Since we specify an explicit ignore, we also
# have to explicitly ignore the list of default ignores:
# `E121,E123,E126,E226,E24,E704,W503,W504` as shown by `flake8 --help`.
FLAKE8_COMMON_ARGS = [
'--show-source', '--builtin', 'gettext', '--max-complexity', '20',
'--import-order-style', 'google',
'--exclude', '.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py',
'--ignore=E121,E123,E126,E226,E24,E704,W503,W504,I202',
]
# Collect sample directories.
ALL_TESTED_SAMPLES = sorted(list(_collect_dirs('.')))
ALL_SAMPLE_DIRECTORIES = sorted(list(_collect_dirs('.', suffix='.py', recurse_further=True)))
GAE_STANDARD_SAMPLES = [
sample for sample in ALL_TESTED_SAMPLES
if sample.startswith('./appengine/standard/')]
PY2_ONLY_SAMPLES = GAE_STANDARD_SAMPLES + [
sample for sample in ALL_TESTED_SAMPLES
if sample.startswith('./composer/workflows')]
PY3_ONLY_SAMPLES = [
sample for sample in ALL_TESTED_SAMPLES
if (sample.startswith('./appengine/standard_python37')
or sample.startswith('./functions/'))]
NON_GAE_STANDARD_SAMPLES_PY2 = sorted(
list((set(ALL_TESTED_SAMPLES) - set(GAE_STANDARD_SAMPLES)) -
set(PY3_ONLY_SAMPLES)))
NON_GAE_STANDARD_SAMPLES_PY3 = sorted(
list(set(ALL_TESTED_SAMPLES) - set(PY2_ONLY_SAMPLES)))
# Filter sample directories if on a CI like Travis or Circle to only run tests
# for changed samples.
CHANGED_FILES = _get_changed_files()
if CHANGED_FILES is not None:
print('Filtering based on changed files.')
ALL_TESTED_SAMPLES = _filter_samples(
ALL_TESTED_SAMPLES, CHANGED_FILES)
ALL_SAMPLE_DIRECTORIES = _filter_samples(
ALL_SAMPLE_DIRECTORIES, CHANGED_FILES)
GAE_STANDARD_SAMPLES = _filter_samples(
GAE_STANDARD_SAMPLES, CHANGED_FILES)
NON_GAE_STANDARD_SAMPLES_PY2 = _filter_samples(
NON_GAE_STANDARD_SAMPLES_PY2, CHANGED_FILES)
NON_GAE_STANDARD_SAMPLES_PY3 = _filter_samples(
NON_GAE_STANDARD_SAMPLES_PY3, CHANGED_FILES)
def _session_tests(session, sample, post_install=None):
"""Runs py.test for a particular sample."""
session.install('-r', 'testing/requirements.txt')
session.chdir(sample)
if os.path.exists(os.path.join(sample, 'requirements.txt')):
session.install('-r', 'requirements.txt')
if post_install:
post_install(session)
session.run(
'pytest',
*(PYTEST_COMMON_ARGS + session.posargs),
# Pytest will return 5 when no tests are collected. This can happen
# on travis where slow and flaky tests are excluded.
# See http://doc.pytest.org/en/latest/_modules/_pytest/main.html
success_codes=[0, 5])
@nox.parametrize('sample', GAE_STANDARD_SAMPLES)
def session_gae(session, sample):
"""Runs py.test for an App Engine standard sample."""
session.interpreter = 'python2.7'
# Create a lib directory if needed, otherwise the App Engine vendor library
# will complain.
if not os.path.isdir(os.path.join(sample, 'lib')):
os.mkdir(os.path.join(sample, 'lib'))
_session_tests(session, sample, _setup_appengine_sdk)
@nox.parametrize('sample', NON_GAE_STANDARD_SAMPLES_PY2)
def session_py27(session, sample):
"""Runs py.test for a sample using Python 2.7"""
session.interpreter = 'python2.7'
_session_tests(session, sample)
@nox.parametrize('sample', NON_GAE_STANDARD_SAMPLES_PY3)
def session_py36(session, sample):
"""Runs py.test for a sample using Python 3.6"""
session.interpreter = 'python3.6'
_session_tests(session, sample)
@nox.parametrize('sample', ALL_SAMPLE_DIRECTORIES)
def session_lint(session, sample):
"""Runs flake8 on the sample."""
session.install('flake8', 'flake8-import-order')
local_names = _determine_local_import_names(sample)
args = FLAKE8_COMMON_ARGS + [
'--application-import-names', ','.join(local_names),
'.']
session.chdir(sample)
session.run('flake8', *args)
#
# Utility sessions
#
def session_missing_tests(session):
"""Lists all sample directories that do not have tests."""
session.virtualenv = False
print('The following samples do not have tests:')
for sample in set(ALL_SAMPLE_DIRECTORIES) - set(ALL_TESTED_SAMPLES):
print('* {}'.format(sample))
SAMPLES_WITH_GENERATED_READMES = sorted(
list(_collect_dirs('.', suffix='.rst.in')))
@nox.parametrize('sample', SAMPLES_WITH_GENERATED_READMES)
def session_readmegen(session, sample):
"""(Re-)generates the readme for a sample."""
session.install('jinja2', 'pyyaml')
if os.path.exists(os.path.join(sample, 'requirements.txt')):
session.install('-r', os.path.join(sample, 'requirements.txt'))
in_file = os.path.join(sample, 'README.rst.in')
session.run('python', 'scripts/readme-gen/readme_gen.py', in_file)
def session_check_requirements(session):
"""Checks for out of date requirements and optionally updates them.
This is intentionally not parametric, as it's desired to never have two
samples with differing versions of dependencies.
"""
session.install('-r', 'testing/requirements.txt')
if 'update' in session.posargs:
command = 'update-requirements'
else:
command = 'check-requirements'
reqfiles = list(_list_files('.', 'requirements*.txt'))
for reqfile in reqfiles:
session.run('gcp-devrel-py-tools', command, reqfile)
|
[] |
[] |
[
"GAE_ROOT"
] |
[]
|
["GAE_ROOT"]
|
python
| 1 | 0 | |
train.py
|
import argparse
import os
from time import sleep
import infolog
import tensorflow as tf
from hparams import hparams
from infolog import log
from tacotron.synthesize import tacotron_synthesize
from tacotron.train import tacotron_train
log = infolog.log
def save_seq(file, sequence, input_path):
'''Save Tacotron-2 training state to disk. (To skip for future runs)
'''
sequence = [str(int(s)) for s in sequence] + [input_path]
with open(file, 'w') as f:
f.write('|'.join(sequence))
def read_seq(file):
'''Load Tacotron-2 training state from disk. (To skip if not first run)
'''
if os.path.isfile(file):
with open(file, 'r') as f:
sequence = f.read().split('|')
return [bool(int(s)) for s in sequence[:-1]], sequence[-1]
else:
return [0, 0, 0], ''
def prepare_run(args):
modified_hp = hparams.parse(args.hparams)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
#os.environ['CUDA_VISIBLE_DEVICES'] = ''
run_name = args.name or args.model
log_dir = os.path.join(args.base_dir, 'logs-{}'.format(run_name))
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'Terminal_train_log'), run_name, args.slack_url)
return log_dir, modified_hp
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', default='')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--tacotron_input', default='training_data/train.txt')
parser.add_argument('--wavenet_input', default='tacotron_output/gta/map.txt')
parser.add_argument('--name', help='Name of logging directory.')
parser.add_argument('--model', default='Tacotron')
parser.add_argument('--input_dir', default='training_data', help='folder to contain inputs sentences/targets')
parser.add_argument('--output_dir', default='output', help='folder to contain synthesized mel spectrograms')
parser.add_argument('--mode', default='synthesis', help='mode for synthesis of tacotron after training')
parser.add_argument('--GTA', default='True', help='Ground truth aligned synthesis, defaults to True, only considered in Tacotron synthesis mode')
parser.add_argument('--restore', type=bool, default=True, help='Set this to False to do a fresh training')
parser.add_argument('--summary_interval', type=int, default=250,
help='Steps between running summary ops')
parser.add_argument('--checkpoint_interval', type=int, default=500,
help='Steps between writing checkpoints')
parser.add_argument('--eval_interval', type=int, default=500,
help='Steps between eval on test data')
parser.add_argument('--tacotron_train_steps', type=int, default=200000, help='total number of tacotron training steps')
parser.add_argument('--wavenet_train_steps', type=int, default=1300000, help='total number of wavenet training steps')
parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
parser.add_argument('--slack_url', default=None, help='slack webhook notification destination link')
args = parser.parse_args()
log_dir, hparams = prepare_run(args)
tacotron_train(args, log_dir, hparams)
if __name__ == '__main__':
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
main()
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES",
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"]
|
python
| 3 | 0 | |
cmd/repo-updater/repos/bitbucketserver_test.go
|
package repos
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/internal/campaigns"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/extsvc/bitbucketserver"
"github.com/sourcegraph/sourcegraph/internal/testutil"
"github.com/sourcegraph/sourcegraph/schema"
)
func TestBitbucketServerSource_MakeRepo(t *testing.T) {
b, err := ioutil.ReadFile(filepath.Join("testdata", "bitbucketserver-repos.json"))
if err != nil {
t.Fatal(err)
}
var repos []*bitbucketserver.Repo
if err := json.Unmarshal(b, &repos); err != nil {
t.Fatal(err)
}
cases := map[string]*schema.BitbucketServerConnection{
"simple": {
Url: "bitbucket.example.com",
Token: "secret",
},
"ssh": {
Url: "https://bitbucket.example.com",
Token: "secret",
InitialRepositoryEnablement: true,
GitURLType: "ssh",
},
"path-pattern": {
Url: "https://bitbucket.example.com",
Token: "secret",
RepositoryPathPattern: "bb/{projectKey}/{repositorySlug}",
},
"username": {
Url: "https://bitbucket.example.com",
Username: "foo",
Token: "secret",
RepositoryPathPattern: "bb/{projectKey}/{repositorySlug}",
},
}
svc := ExternalService{ID: 1, Kind: extsvc.KindBitbucketServer}
for name, config := range cases {
t.Run(name, func(t *testing.T) {
s, err := newBitbucketServerSource(&svc, config, nil)
if err != nil {
t.Fatal(err)
}
var got []*Repo
for _, r := range repos {
got = append(got, s.makeRepo(r, false))
}
path := filepath.Join("testdata", "bitbucketserver-repos-"+name+".golden")
testutil.AssertGolden(t, path, update(name), got)
})
}
}
func TestBitbucketServerSource_Exclude(t *testing.T) {
b, err := ioutil.ReadFile(filepath.Join("testdata", "bitbucketserver-repos.json"))
if err != nil {
t.Fatal(err)
}
var repos []*bitbucketserver.Repo
if err := json.Unmarshal(b, &repos); err != nil {
t.Fatal(err)
}
cases := map[string]*schema.BitbucketServerConnection{
"none": {
Url: "https://bitbucket.example.com",
Token: "secret",
},
"name": {
Url: "https://bitbucket.example.com",
Token: "secret",
Exclude: []*schema.ExcludedBitbucketServerRepo{{
Name: "SG/python-langserver-fork",
}, {
Name: "~KEEGAN/rgp",
}},
},
"id": {
Url: "https://bitbucket.example.com",
Token: "secret",
Exclude: []*schema.ExcludedBitbucketServerRepo{{Id: 4}},
},
"pattern": {
Url: "https://bitbucket.example.com",
Token: "secret",
Exclude: []*schema.ExcludedBitbucketServerRepo{{
Pattern: "SG/python.*",
}, {
Pattern: "~KEEGAN/.*",
}},
},
"both": {
Url: "https://bitbucket.example.com",
Token: "secret",
// We match on the bitbucket server repo name, not the repository path pattern.
RepositoryPathPattern: "bb/{projectKey}/{repositorySlug}",
Exclude: []*schema.ExcludedBitbucketServerRepo{{
Id: 1,
}, {
Name: "~KEEGAN/rgp",
}, {
Pattern: ".*-fork",
}},
},
}
svc := ExternalService{ID: 1, Kind: extsvc.KindBitbucketServer}
for name, config := range cases {
t.Run(name, func(t *testing.T) {
s, err := newBitbucketServerSource(&svc, config, nil)
if err != nil {
t.Fatal(err)
}
type output struct {
Include []string
Exclude []string
}
var got output
for _, r := range repos {
name := r.Slug
if r.Project != nil {
name = r.Project.Key + "/" + name
}
if s.excludes(r) {
got.Exclude = append(got.Exclude, name)
} else {
got.Include = append(got.Include, name)
}
}
path := filepath.Join("testdata", "bitbucketserver-repos-exclude-"+name+".golden")
testutil.AssertGolden(t, path, update(name), got)
})
}
}
func TestBitbucketServerSource_LoadChangesets(t *testing.T) {
instanceURL := os.Getenv("BITBUCKET_SERVER_URL")
if instanceURL == "" {
// The test fixtures and golden files were generated with
// this config pointed to bitbucket.sgdev.org
instanceURL = "https://bitbucket.sgdev.org"
}
repo := &Repo{
Metadata: &bitbucketserver.Repo{
Slug: "vegeta",
Project: &bitbucketserver.Project{Key: "SOUR"},
},
}
changesets := []*Changeset{
{Repo: repo, Changeset: &campaigns.Changeset{ExternalID: "2"}},
{Repo: repo, Changeset: &campaigns.Changeset{ExternalID: "4"}},
{Repo: repo, Changeset: &campaigns.Changeset{ExternalID: "999"}},
}
testCases := []struct {
name string
cs []*Changeset
err string
}{
{
name: "found",
cs: []*Changeset{changesets[0], changesets[1]},
},
{
name: "subset-not-found",
cs: []*Changeset{changesets[0], changesets[2]},
err: `Changeset with external ID "999" not found`,
},
}
for _, tc := range testCases {
tc := tc
tc.name = "BitbucketServerSource_LoadChangesets_" + tc.name
t.Run(tc.name, func(t *testing.T) {
cf, save := newClientFactory(t, tc.name)
defer save(t)
lg := log15.New()
lg.SetHandler(log15.DiscardHandler())
svc := &ExternalService{
Kind: extsvc.KindBitbucketServer,
Config: marshalJSON(t, &schema.BitbucketServerConnection{
Url: instanceURL,
Token: os.Getenv("BITBUCKET_SERVER_TOKEN"),
}),
}
bbsSrc, err := NewBitbucketServerSource(svc, cf)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
if tc.err == "" {
tc.err = "<nil>"
}
tc.err = strings.ReplaceAll(tc.err, "${INSTANCEURL}", instanceURL)
err = bbsSrc.LoadChangesets(ctx, tc.cs...)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Errorf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil {
return
}
meta := make([]*bitbucketserver.PullRequest, 0, len(tc.cs))
for _, cs := range tc.cs {
meta = append(meta, cs.Changeset.Metadata.(*bitbucketserver.PullRequest))
}
testutil.AssertGolden(t, "testdata/golden/"+tc.name, update(tc.name), meta)
})
}
}
func TestBitbucketServerSource_CreateChangeset(t *testing.T) {
instanceURL := os.Getenv("BITBUCKET_SERVER_URL")
if instanceURL == "" {
// The test fixtures and golden files were generated with
// this config pointed to bitbucket.sgdev.org
instanceURL = "https://bitbucket.sgdev.org"
}
repo := &Repo{
Metadata: &bitbucketserver.Repo{
Slug: "automation-testing",
Project: &bitbucketserver.Project{Key: "SOUR"},
},
}
testCases := []struct {
name string
cs *Changeset
err string
exists bool
}{
{
name: "abbreviated refs",
cs: &Changeset{
Title: "This is a test PR",
Body: "This is the body of a test PR",
BaseRef: "master",
HeadRef: "test-pr-bbs-11",
Repo: repo,
Changeset: &campaigns.Changeset{},
},
},
{
name: "success",
cs: &Changeset{
Title: "This is a test PR",
Body: "This is the body of a test PR",
BaseRef: "refs/heads/master",
HeadRef: "refs/heads/test-pr-bbs-12",
Repo: repo,
Changeset: &campaigns.Changeset{},
},
},
{
name: "already exists",
cs: &Changeset{
Title: "This is a test PR",
Body: "This is the body of a test PR",
BaseRef: "refs/heads/master",
HeadRef: "refs/heads/always-open-pr-bbs",
Repo: repo,
Changeset: &campaigns.Changeset{},
},
// CreateChangeset is idempotent so if the PR already exists
// it is not an error
err: "",
exists: true,
},
}
for _, tc := range testCases {
tc := tc
tc.name = "BitbucketServerSource_CreateChangeset_" + tc.name
t.Run(tc.name, func(t *testing.T) {
cf, save := newClientFactory(t, tc.name)
defer save(t)
lg := log15.New()
lg.SetHandler(log15.DiscardHandler())
svc := &ExternalService{
Kind: extsvc.KindBitbucketServer,
Config: marshalJSON(t, &schema.BitbucketServerConnection{
Url: instanceURL,
Token: os.Getenv("BITBUCKET_SERVER_TOKEN"),
}),
}
bbsSrc, err := NewBitbucketServerSource(svc, cf)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
if tc.err == "" {
tc.err = "<nil>"
}
tc.err = strings.ReplaceAll(tc.err, "${INSTANCEURL}", instanceURL)
exists, err := bbsSrc.CreateChangeset(ctx, tc.cs)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Errorf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil {
return
}
if have, want := exists, tc.exists; have != want {
t.Errorf("exists:\nhave: %t\nwant: %t", have, want)
}
pr := tc.cs.Changeset.Metadata.(*bitbucketserver.PullRequest)
testutil.AssertGolden(t, "testdata/golden/"+tc.name, update(tc.name), pr)
})
}
}
func TestBitbucketServerSource_CloseChangeset(t *testing.T) {
instanceURL := os.Getenv("BITBUCKET_SERVER_URL")
if instanceURL == "" {
// The test fixtures and golden files were generated with
// this config pointed to bitbucket.sgdev.org
instanceURL = "https://bitbucket.sgdev.org"
}
pr := &bitbucketserver.PullRequest{ID: 59, Version: 4}
pr.ToRef.Repository.Slug = "automation-testing"
pr.ToRef.Repository.Project.Key = "SOUR"
testCases := []struct {
name string
cs *Changeset
err string
}{
{
name: "success",
cs: &Changeset{Changeset: &campaigns.Changeset{Metadata: pr}},
},
}
for _, tc := range testCases {
tc := tc
tc.name = "BitbucketServerSource_CloseChangeset_" + strings.Replace(tc.name, " ", "_", -1)
t.Run(tc.name, func(t *testing.T) {
cf, save := newClientFactory(t, tc.name)
defer save(t)
lg := log15.New()
lg.SetHandler(log15.DiscardHandler())
svc := &ExternalService{
Kind: extsvc.KindBitbucketServer,
Config: marshalJSON(t, &schema.BitbucketServerConnection{
Url: instanceURL,
Token: os.Getenv("BITBUCKET_SERVER_TOKEN"),
}),
}
bbsSrc, err := NewBitbucketServerSource(svc, cf)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
if tc.err == "" {
tc.err = "<nil>"
}
tc.err = strings.ReplaceAll(tc.err, "${INSTANCEURL}", instanceURL)
err = bbsSrc.CloseChangeset(ctx, tc.cs)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Errorf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil {
return
}
pr := tc.cs.Changeset.Metadata.(*bitbucketserver.PullRequest)
testutil.AssertGolden(t, "testdata/golden/"+tc.name, update(tc.name), pr)
})
}
}
func TestBitbucketServerSource_UpdateChangeset(t *testing.T) {
instanceURL := os.Getenv("BITBUCKET_SERVER_URL")
if instanceURL == "" {
// The test fixtures and golden files were generated with
// this config pointed to bitbucket.sgdev.org
instanceURL = "https://bitbucket.sgdev.org"
}
pr := &bitbucketserver.PullRequest{ID: 43, Version: 5}
pr.ToRef.Repository.Slug = "automation-testing"
pr.ToRef.Repository.Project.Key = "SOUR"
testCases := []struct {
name string
cs *Changeset
err string
}{
{
name: "success",
cs: &Changeset{
Title: "This is a new title",
Body: "This is a new body",
BaseRef: "refs/heads/master",
Changeset: &campaigns.Changeset{Metadata: pr},
},
},
}
for _, tc := range testCases {
tc := tc
tc.name = "BitbucketServerSource_UpdateChangeset_" + strings.Replace(tc.name, " ", "_", -1)
t.Run(tc.name, func(t *testing.T) {
cf, save := newClientFactory(t, tc.name)
defer save(t)
lg := log15.New()
lg.SetHandler(log15.DiscardHandler())
svc := &ExternalService{
Kind: extsvc.KindBitbucketServer,
Config: marshalJSON(t, &schema.BitbucketServerConnection{
Url: instanceURL,
Token: os.Getenv("BITBUCKET_SERVER_TOKEN"),
}),
}
bbsSrc, err := NewBitbucketServerSource(svc, cf)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
if tc.err == "" {
tc.err = "<nil>"
}
tc.err = strings.ReplaceAll(tc.err, "${INSTANCEURL}", instanceURL)
err = bbsSrc.UpdateChangeset(ctx, tc.cs)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Errorf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil {
return
}
pr := tc.cs.Changeset.Metadata.(*bitbucketserver.PullRequest)
testutil.AssertGolden(t, "testdata/golden/"+tc.name, update(tc.name), pr)
})
}
}
|
[
"\"BITBUCKET_SERVER_URL\"",
"\"BITBUCKET_SERVER_TOKEN\"",
"\"BITBUCKET_SERVER_URL\"",
"\"BITBUCKET_SERVER_TOKEN\"",
"\"BITBUCKET_SERVER_URL\"",
"\"BITBUCKET_SERVER_TOKEN\"",
"\"BITBUCKET_SERVER_URL\"",
"\"BITBUCKET_SERVER_TOKEN\""
] |
[] |
[
"BITBUCKET_SERVER_URL",
"BITBUCKET_SERVER_TOKEN"
] |
[]
|
["BITBUCKET_SERVER_URL", "BITBUCKET_SERVER_TOKEN"]
|
go
| 2 | 0 | |
backend/pkg/auth/jwt.go
|
package auth
import (
"os"
"time"
jwt "github.com/dgrijalva/jwt-go"
)
var JWTSigningMethod = jwt.SigningMethodHS512
var JWTStandardClaims = &jwt.StandardClaims{
Issuer: "Velocity CI",
}
// Audience constants
const (
AudienceUser = "user"
AudienceBuilder = "builder"
)
func NewJWT(expiryDuration time.Duration, audience, subject string) (string, time.Time) {
now := time.Now()
expires := time.Now().Add(expiryDuration)
claims := JWTStandardClaims
claims.ExpiresAt = expires.Unix()
claims.NotBefore = now.Unix()
claims.IssuedAt = now.Unix()
claims.Subject = subject
claims.Audience = audience
token := jwt.NewWithClaims(JWTSigningMethod, claims)
tokenString, _ := token.SignedString([]byte(os.Getenv("JWT_SECRET")))
return tokenString, expires
}
|
[
"\"JWT_SECRET\""
] |
[] |
[
"JWT_SECRET"
] |
[]
|
["JWT_SECRET"]
|
go
| 1 | 0 | |
cmd/cp-main.go
|
/*
* MinIO Client (C) 2014-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/fatih/color"
jsoniter "github.com/json-iterator/go"
"github.com/minio/cli"
json "github.com/minio/mc/pkg/colorjson"
"github.com/minio/mc/pkg/probe"
"github.com/minio/minio/pkg/console"
)
// cp command flags.
var (
cpFlags = []cli.Flag{
cli.StringFlag{
Name: "rewind",
Usage: "roll back object(s) to current version at specified time",
},
cli.StringFlag{
Name: "version-id, vid",
Usage: "select an object version to copy",
},
cli.BoolFlag{
Name: "recursive, r",
Usage: "copy recursively",
},
cli.StringFlag{
Name: "older-than",
Usage: "copy objects older than L days, M hours and N minutes",
},
cli.StringFlag{
Name: "newer-than",
Usage: "copy objects newer than L days, M hours and N minutes",
},
cli.StringFlag{
Name: "storage-class, sc",
Usage: "set storage class for new object(s) on target",
},
cli.StringFlag{
Name: "encrypt",
Usage: "encrypt/decrypt objects (using server-side encryption with server managed keys)",
},
cli.StringFlag{
Name: "attr",
Usage: "add custom metadata for the object",
},
cli.BoolFlag{
Name: "continue, c",
Usage: "create or resume copy session",
},
cli.BoolFlag{
Name: "preserve, a",
Usage: "preserve filesystem attributes (mode, ownership, timestamps)",
},
cli.BoolFlag{
Name: "disable-multipart",
Usage: "disable multipart upload feature",
},
cli.BoolFlag{
Name: "md5",
Usage: "force all upload(s) to calculate md5sum checksum",
},
cli.StringFlag{
Name: rmFlag,
Usage: "retention mode to be applied on the object (governance, compliance)",
},
cli.StringFlag{
Name: rdFlag,
Usage: "retention duration for the object in d days or y years",
},
cli.StringFlag{
Name: lhFlag,
Usage: "apply legal hold to the copied object (on, off)",
},
}
)
var rmFlag = "retention-mode"
var rdFlag = "retention-duration"
var lhFlag = "legal-hold"
// ErrInvalidMetadata reflects invalid metadata format
var ErrInvalidMetadata = errors.New("specified metadata should be of form key1=value1;key2=value2;... and so on")
// Copy command.
var cpCmd = cli.Command{
Name: "cp",
Usage: "copy objects",
Action: mainCopy,
OnUsageError: onUsageError,
Before: setGlobalsFromContext,
Flags: append(append(cpFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} [FLAGS] SOURCE [SOURCE...] TARGET
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}
ENVIRONMENT VARIABLES:
MC_ENCRYPT: list of comma delimited prefixes
MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
EXAMPLES:
01. Copy a list of objects from local file system to Amazon S3 cloud storage.
{{.Prompt}} {{.HelpName}} Music/*.ogg s3/jukebox/
02. Copy a folder recursively from MinIO cloud storage to Amazon S3 cloud storage.
{{.Prompt}} {{.HelpName}} --recursive play/mybucket/burningman2011/ s3/mybucket/
03. Copy multiple local folders recursively to MinIO cloud storage.
{{.Prompt}} {{.HelpName}} --recursive backup/2014/ backup/2015/ play/archive/
04. Copy a bucket recursively from aliased Amazon S3 cloud storage to local filesystem on Windows.
{{.Prompt}} {{.HelpName}} --recursive s3\documents\2014\ C:\Backups\2014
05. Copy files older than 7 days and 10 hours from MinIO cloud storage to Amazon S3 cloud storage.
{{.Prompt}} {{.HelpName}} --older-than 7d10h play/mybucket/burningman2011/ s3/mybucket/
06. Copy files newer than 7 days and 10 hours from MinIO cloud storage to a local path.
{{.Prompt}} {{.HelpName}} --newer-than 7d10h play/mybucket/burningman2011/ ~/latest/
07. Copy an object with name containing unicode characters to Amazon S3 cloud storage.
{{.Prompt}} {{.HelpName}} 本語 s3/andoria/
08. Copy a local folder with space separated characters to Amazon S3 cloud storage.
{{.Prompt}} {{.HelpName}} --recursive 'workdir/documents/May 2014/' s3/miniocloud
09. Copy a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage.
{{.Prompt}} {{.HelpName}} --recursive --encrypt-key "s3/documents/=32byteslongsecretkeymustbegiven1,myminio/documents/=32byteslongsecretkeymustbegiven2" s3/documents/ myminio/documents/
10. Copy a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage. In case the encryption key contains non-printable character like tab, pass the
base64 encoded string as key.
{{.Prompt}} {{.HelpName}} --recursive --encrypt-key "s3/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=,myminio/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=" s3/documents/ myminio/documents/
11. Copy a list of objects from local file system to MinIO cloud storage with specified metadata, separated by ";"
{{.Prompt}} {{.HelpName}} --attr "key1=value1;key2=value2" Music/*.mp4 play/mybucket/
12. Copy a folder recursively from MinIO cloud storage to Amazon S3 cloud storage with Cache-Control and custom metadata, separated by ";".
{{.Prompt}} {{.HelpName}} --attr "Cache-Control=max-age=90000,min-fresh=9000;key1=value1;key2=value2" --recursive play/mybucket/burningman2011/ s3/mybucket/
13. Copy a text file to an object storage and assign REDUCED_REDUNDANCY storage-class to the uploaded object.
{{.Prompt}} {{.HelpName}} --storage-class REDUCED_REDUNDANCY myobject.txt play/mybucket
14. Copy a text file to an object storage and create or resume copy session.
{{.Prompt}} {{.HelpName}} --recursive --continue dir/ play/mybucket
15. Copy a text file to an object storage and preserve the file system attribute as metadata.
{{.Prompt}} {{.HelpName}} -a myobject.txt play/mybucket
16. Copy a text file to an object storage with object lock mode set to 'GOVERNANCE' with retention duration 1 day.
{{.Prompt}} {{.HelpName}} --retention-mode governance --retention-duration 1d locked.txt play/locked-bucket/
17. Copy a text file to an object storage with legal-hold enabled.
{{.Prompt}} {{.HelpName}} --legal-hold on locked.txt play/locked-bucket/
18. Copy a text file to an object storage and disable multipart upload feature.
{{.Prompt}} {{.HelpName}} --disable-multipart myobject.txt play/mybucket
19. Roll back 10 days in the past to copy the content of 'mybucket'
{{.Prompt}} {{.HelpName}} --rewind 10d -r play/mybucket/ /tmp/dest/
`,
}
// copyMessage container for file copy messages
type copyMessage struct {
Status string `json:"status"`
Source string `json:"source"`
Target string `json:"target"`
Size int64 `json:"size"`
TotalCount int64 `json:"totalCount"`
TotalSize int64 `json:"totalSize"`
}
// String colorized copy message
func (c copyMessage) String() string {
return console.Colorize("Copy", fmt.Sprintf("`%s` -> `%s`", c.Source, c.Target))
}
// JSON jsonified copy message
func (c copyMessage) JSON() string {
c.Status = "success"
copyMessageBytes, e := json.MarshalIndent(c, "", " ")
fatalIf(probe.NewError(e), "Unable to marshal into JSON.")
return string(copyMessageBytes)
}
// Progress - an interface which describes current amount
// of data written.
type Progress interface {
Get() int64
SetTotal(int64)
}
// ProgressReader can be used to update the progress of
// an on-going transfer progress.
type ProgressReader interface {
io.Reader
Progress
}
// doCopy - Copy a single file from source to destination
func doCopy(ctx context.Context, cpURLs URLs, pg ProgressReader, encKeyDB map[string][]prefixSSEPair, isMvCmd bool, preserve bool) URLs {
if cpURLs.Error != nil {
cpURLs.Error = cpURLs.Error.Trace()
return cpURLs
}
sourceAlias := cpURLs.SourceAlias
sourceURL := cpURLs.SourceContent.URL
targetAlias := cpURLs.TargetAlias
targetURL := cpURLs.TargetContent.URL
length := cpURLs.SourceContent.Size
sourcePath := filepath.ToSlash(filepath.Join(sourceAlias, sourceURL.Path))
if progressReader, ok := pg.(*progressBar); ok {
progressReader.SetCaption(cpURLs.SourceContent.URL.String() + ": ")
} else {
targetPath := filepath.ToSlash(filepath.Join(targetAlias, targetURL.Path))
printMsg(copyMessage{
Source: sourcePath,
Target: targetPath,
Size: length,
TotalCount: cpURLs.TotalCount,
TotalSize: cpURLs.TotalSize,
})
}
urls := uploadSourceToTargetURL(ctx, cpURLs, pg, encKeyDB, preserve)
if isMvCmd && urls.Error == nil {
rmManager.add(ctx, sourceAlias, sourceURL.String())
}
return urls
}
// doCopyFake - Perform a fake copy to update the progress bar appropriately.
func doCopyFake(ctx context.Context, cpURLs URLs, pg Progress) URLs {
if progressReader, ok := pg.(*progressBar); ok {
progressReader.ProgressBar.Add64(cpURLs.SourceContent.Size)
}
return cpURLs
}
// doPrepareCopyURLs scans the source URL and prepares a list of objects for copying.
func doPrepareCopyURLs(ctx context.Context, session *sessionV8, cancelCopy context.CancelFunc) (totalBytes, totalObjects int64) {
// Separate source and target. 'cp' can take only one target,
// but any number of sources.
sourceURLs := session.Header.CommandArgs[:len(session.Header.CommandArgs)-1]
targetURL := session.Header.CommandArgs[len(session.Header.CommandArgs)-1] // Last one is target
// Access recursive flag inside the session header.
isRecursive := session.Header.CommandBoolFlags["recursive"]
rewind := session.Header.CommandStringFlags["rewind"]
versionID := session.Header.CommandStringFlags["version-id"]
olderThan := session.Header.CommandStringFlags["older-than"]
newerThan := session.Header.CommandStringFlags["newer-than"]
encryptKeys := session.Header.CommandStringFlags["encrypt-key"]
encrypt := session.Header.CommandStringFlags["encrypt"]
encKeyDB, err := parseAndValidateEncryptionKeys(encryptKeys, encrypt)
fatalIf(err, "Unable to parse encryption keys.")
// Create a session data file to store the processed URLs.
dataFP := session.NewDataWriter()
var scanBar scanBarFunc
if !globalQuiet && !globalJSON { // set up progress bar
scanBar = scanBarFactory()
}
URLsCh := prepareCopyURLs(ctx, sourceURLs, targetURL, isRecursive, encKeyDB, olderThan, newerThan, parseRewindFlag(rewind), versionID)
done := false
for !done {
select {
case cpURLs, ok := <-URLsCh:
if !ok { // Done with URL preparation
done = true
break
}
if cpURLs.Error != nil {
// Print in new line and adjust to top so that we don't print over the ongoing scan bar
if !globalQuiet && !globalJSON {
console.Eraseline()
}
if strings.Contains(cpURLs.Error.ToGoError().Error(), " is a folder.") {
errorIf(cpURLs.Error.Trace(), "Folder cannot be copied. Please use `...` suffix.")
} else {
errorIf(cpURLs.Error.Trace(), "Unable to prepare URL for copying.")
}
break
}
var jsoniter = jsoniter.ConfigCompatibleWithStandardLibrary
jsonData, e := jsoniter.Marshal(cpURLs)
if e != nil {
session.Delete()
fatalIf(probe.NewError(e), "Unable to prepare URL for copying. Error in JSON marshaling.")
}
dataFP.Write(jsonData)
dataFP.Write([]byte{'\n'})
if !globalQuiet && !globalJSON {
scanBar(cpURLs.SourceContent.URL.String())
}
totalBytes += cpURLs.SourceContent.Size
totalObjects++
case <-globalContext.Done():
cancelCopy()
// Print in new line and adjust to top so that we don't print over the ongoing scan bar
if !globalQuiet && !globalJSON {
console.Eraseline()
}
session.Delete() // If we are interrupted during the URL scanning, we drop the session.
os.Exit(0)
}
}
session.Header.TotalBytes = totalBytes
session.Header.TotalObjects = totalObjects
session.Save()
return
}
func doCopySession(ctx context.Context, cancelCopy context.CancelFunc, cli *cli.Context, session *sessionV8, encKeyDB map[string][]prefixSSEPair, isMvCmd bool) error {
var isCopied func(string) bool
var totalObjects, totalBytes int64
var cpURLsCh = make(chan URLs, 10000)
// Store a progress bar or an accounter
var pg ProgressReader
// Enable progress bar reader only during default mode.
if !globalQuiet && !globalJSON { // set up progress bar
pg = newProgressBar(totalBytes)
} else {
pg = newAccounter(totalBytes)
}
sourceURLs := cli.Args()[:len(cli.Args())-1]
targetURL := cli.Args()[len(cli.Args())-1] // Last one is target
tgtClnt, err := newClient(targetURL)
fatalIf(err, "Unable to initialize `"+targetURL+"`.")
// Check if the target bucket has object locking enabled
var withLock bool
if _, _, _, _, err = tgtClnt.GetObjectLockConfig(ctx); err == nil {
withLock = true
}
if session != nil {
// isCopied returns true if an object has been already copied
// or not. This is useful when we resume from a session.
isCopied = isLastFactory(session.Header.LastCopied)
if !session.HasData() {
totalBytes, totalObjects = doPrepareCopyURLs(ctx, session, cancelCopy)
} else {
totalBytes, totalObjects = session.Header.TotalBytes, session.Header.TotalObjects
}
pg.SetTotal(totalBytes)
go func() {
var jsoniter = jsoniter.ConfigCompatibleWithStandardLibrary
// Prepare URL scanner from session data file.
urlScanner := bufio.NewScanner(session.NewDataReader())
for {
if !urlScanner.Scan() || urlScanner.Err() != nil {
close(cpURLsCh)
break
}
var cpURLs URLs
if e := jsoniter.Unmarshal([]byte(urlScanner.Text()), &cpURLs); e != nil {
errorIf(probe.NewError(e), "Unable to unmarshal %s", urlScanner.Text())
continue
}
cpURLsCh <- cpURLs
}
}()
} else {
// Access recursive flag inside the session header.
isRecursive := cli.Bool("recursive")
olderThan := cli.String("older-than")
newerThan := cli.String("newer-than")
rewind := cli.String("rewind")
versionID := cli.String("version-id")
go func() {
totalBytes := int64(0)
for cpURLs := range prepareCopyURLs(ctx, sourceURLs, targetURL, isRecursive,
encKeyDB, olderThan, newerThan, parseRewindFlag(rewind), versionID) {
if cpURLs.Error != nil {
// Print in new line and adjust to top so that we
// don't print over the ongoing scan bar
if !globalQuiet && !globalJSON {
console.Eraseline()
}
if strings.Contains(cpURLs.Error.ToGoError().Error(),
" is a folder.") {
errorIf(cpURLs.Error.Trace(),
"Folder cannot be copied. Please use `...` suffix.")
} else {
errorIf(cpURLs.Error.Trace(),
"Unable to start copying.")
}
break
} else {
totalBytes += cpURLs.SourceContent.Size
pg.SetTotal(totalBytes)
totalObjects++
}
cpURLsCh <- cpURLs
}
close(cpURLsCh)
}()
}
var quitCh = make(chan struct{})
var statusCh = make(chan URLs)
parallel := newParallelManager(statusCh)
go func() {
gracefulStop := func() {
parallel.stopAndWait()
close(statusCh)
}
for {
select {
case <-quitCh:
gracefulStop()
return
case cpURLs, ok := <-cpURLsCh:
if !ok {
gracefulStop()
return
}
// Save total count.
cpURLs.TotalCount = totalObjects
// Save totalSize.
cpURLs.TotalSize = totalBytes
// Initialize target metadata.
cpURLs.TargetContent.Metadata = make(map[string]string)
// Initialize target user metadata.
cpURLs.TargetContent.UserMetadata = make(map[string]string)
// Check and handle storage class if passed in command line args
if storageClass := cli.String("storage-class"); storageClass != "" {
cpURLs.TargetContent.Metadata["X-Amz-Storage-Class"] = storageClass
}
// update Object retention related fields
if session != nil {
cpURLs.TargetContent.RetentionMode = session.Header.CommandStringFlags[rmFlag]
if cpURLs.TargetContent.RetentionMode != "" {
cpURLs.TargetContent.RetentionEnabled = true
}
cpURLs.TargetContent.RetentionDuration = session.Header.CommandStringFlags[rdFlag]
cpURLs.TargetContent.LegalHold = strings.ToUpper(session.Header.CommandStringFlags[lhFlag])
if cpURLs.TargetContent.LegalHold != "" {
cpURLs.TargetContent.LegalHoldEnabled = true
}
} else {
if rm := cli.String(rmFlag); rm != "" {
cpURLs.TargetContent.RetentionMode = rm
cpURLs.TargetContent.RetentionEnabled = true
}
if rd := cli.String(rdFlag); rd != "" {
cpURLs.TargetContent.RetentionDuration = rd
}
if lh := cli.String(lhFlag); lh != "" {
cpURLs.TargetContent.LegalHold = strings.ToUpper(lh)
cpURLs.TargetContent.LegalHoldEnabled = true
}
}
preserve := cli.Bool("preserve")
if cli.String("attr") != "" {
userMetaMap, _ := getMetaDataEntry(cli.String("attr"))
for metadataKey, metaDataVal := range userMetaMap {
cpURLs.TargetContent.UserMetadata[metadataKey] = metaDataVal
}
}
cpURLs.MD5 = cli.Bool("md5") || withLock
cpURLs.DisableMultipart = cli.Bool("disable-multipart")
// Verify if previously copied, notify progress bar.
if isCopied != nil && isCopied(cpURLs.SourceContent.URL.String()) {
parallel.queueTask(func() URLs {
return doCopyFake(ctx, cpURLs, pg)
})
} else {
parallel.queueTask(func() URLs {
return doCopy(ctx, cpURLs, pg, encKeyDB, isMvCmd, preserve)
})
}
}
}
}()
var retErr error
errSeen := false
cpAllFilesErr := true
loop:
for {
select {
case <-globalContext.Done():
close(quitCh)
cancelCopy()
// Receive interrupt notification.
if !globalQuiet && !globalJSON {
console.Eraseline()
}
if session != nil {
session.CloseAndDie()
}
break loop
case cpURLs, ok := <-statusCh:
// Status channel is closed, we should return.
if !ok {
break loop
}
if cpURLs.Error == nil {
if session != nil {
session.Header.LastCopied = cpURLs.SourceContent.URL.String()
session.Save()
}
cpAllFilesErr = false
} else {
// Set exit status for any copy error
retErr = exitStatus(globalErrorExitStatus)
// Print in new line and adjust to top so that we
// don't print over the ongoing progress bar.
if !globalQuiet && !globalJSON {
console.Eraseline()
}
errorIf(cpURLs.Error.Trace(cpURLs.SourceContent.URL.String()),
fmt.Sprintf("Failed to copy `%s`.", cpURLs.SourceContent.URL.String()))
if isErrIgnored(cpURLs.Error) {
cpAllFilesErr = false
continue loop
}
errSeen = true
if progressReader, pgok := pg.(*progressBar); pgok {
if progressReader.ProgressBar.Get() > 0 {
writeContSize := (int)(cpURLs.SourceContent.Size)
totalPGSize := (int)(progressReader.ProgressBar.Total)
written := (int)(progressReader.ProgressBar.Get())
if totalPGSize > writeContSize && written > writeContSize {
progressReader.ProgressBar.Set((written - writeContSize))
progressReader.ProgressBar.Update()
}
}
}
if session != nil {
// For critical errors we should exit. Session
// can be resumed after the user figures out
// the problem.
session.copyCloseAndDie(session.Header.CommandBoolFlags["session"])
}
}
}
}
if progressReader, ok := pg.(*progressBar); ok {
if (errSeen && totalObjects == 1) || (cpAllFilesErr && totalObjects > 1) {
console.Eraseline()
} else if progressReader.ProgressBar.Get() > 0 {
progressReader.ProgressBar.Finish()
}
} else {
if accntReader, ok := pg.(*accounter); ok {
printMsg(accntReader.Stat())
}
}
return retErr
}
// validate the passed metadataString and populate the map
func getMetaDataEntry(metadataString string) (map[string]string, *probe.Error) {
metaDataMap := make(map[string]string)
r := strings.NewReader(metadataString)
type pToken int
const (
KEY pToken = iota
VALUE
)
type pState int
const (
NORMAL pState = iota
QSTRING
DQSTRING
)
var key, value strings.Builder
writeRune := func(ch rune, pt pToken) {
if pt == KEY {
key.WriteRune(ch)
} else if pt == VALUE {
value.WriteRune(ch)
} else {
panic("Invalid parser token type")
}
}
ps := NORMAL
pt := KEY
p := 0
for ; ; p++ {
ch, _, err := r.ReadRune()
if err != nil {
//eof
if ps == QSTRING || ps == DQSTRING || pt == KEY {
return nil, probe.NewError(ErrInvalidMetadata)
}
metaDataMap[http.CanonicalHeaderKey(key.String())] = value.String()
return metaDataMap, nil
}
if ch == '"' {
if ps == DQSTRING {
ps = NORMAL
} else if ps == QSTRING {
writeRune(ch, pt)
} else if ps == NORMAL {
ps = DQSTRING
} else {
break
}
continue
}
if ch == '\'' {
if ps == QSTRING {
ps = NORMAL
} else if ps == DQSTRING {
writeRune(ch, pt)
} else if ps == NORMAL {
ps = QSTRING
} else {
break
}
continue
}
if ch == '=' {
if ps == QSTRING || ps == DQSTRING {
writeRune(ch, pt)
} else if pt == KEY {
pt = VALUE
} else if pt == VALUE {
writeRune(ch, pt)
} else {
break
}
continue
}
if ch == ';' {
if ps == QSTRING || ps == DQSTRING {
writeRune(ch, pt)
} else if pt == KEY {
return nil, probe.NewError(ErrInvalidMetadata)
} else if pt == VALUE {
metaDataMap[http.CanonicalHeaderKey(key.String())] = value.String()
key.Reset()
value.Reset()
pt = KEY
} else {
break
}
continue
}
writeRune(ch, pt)
}
fatalErr := fmt.Sprintf("Invalid parser state at index: %d", p)
panic(fatalErr)
}
// mainCopy is the entry point for cp command.
func mainCopy(cliCtx *cli.Context) error {
ctx, cancelCopy := context.WithCancel(globalContext)
defer cancelCopy()
// Parse encryption keys per command.
encKeyDB, err := getEncKeys(cliCtx)
fatalIf(err, "Unable to parse encryption keys.")
// Parse metadata.
userMetaMap := make(map[string]string)
if cliCtx.String("attr") != "" {
userMetaMap, err = getMetaDataEntry(cliCtx.String("attr"))
fatalIf(err, "Unable to parse attribute %v", cliCtx.String("attr"))
}
// check 'copy' cli arguments.
checkCopySyntax(ctx, cliCtx, encKeyDB, false)
// Additional command specific theme customization.
console.SetColor("Copy", color.New(color.FgGreen, color.Bold))
recursive := cliCtx.Bool("recursive")
rewind := cliCtx.String("rewind")
versionID := cliCtx.String("version-id")
olderThan := cliCtx.String("older-than")
newerThan := cliCtx.String("newer-than")
storageClass := cliCtx.String("storage-class")
retentionMode := cliCtx.String(rmFlag)
retentionDuration := cliCtx.String(rdFlag)
legalHold := strings.ToUpper(cliCtx.String(lhFlag))
sseKeys := os.Getenv("MC_ENCRYPT_KEY")
if key := cliCtx.String("encrypt-key"); key != "" {
sseKeys = key
}
if sseKeys != "" {
sseKeys, err = getDecodedKey(sseKeys)
fatalIf(err, "Unable to parse encryption keys.")
}
sse := cliCtx.String("encrypt")
var session *sessionV8
if cliCtx.Bool("continue") {
sessionID := getHash("cp", cliCtx.Args())
if isSessionExists(sessionID) {
session, err = loadSessionV8(sessionID)
fatalIf(err.Trace(sessionID), "Unable to load session.")
} else {
session = newSessionV8(sessionID)
session.Header.CommandType = "cp"
session.Header.CommandBoolFlags["recursive"] = recursive
session.Header.CommandStringFlags["rewind"] = rewind
session.Header.CommandStringFlags["version-id"] = versionID
session.Header.CommandStringFlags["older-than"] = olderThan
session.Header.CommandStringFlags["newer-than"] = newerThan
session.Header.CommandStringFlags["storage-class"] = storageClass
session.Header.CommandStringFlags[rmFlag] = retentionMode
session.Header.CommandStringFlags[rdFlag] = retentionDuration
session.Header.CommandStringFlags[lhFlag] = legalHold
session.Header.CommandStringFlags["encrypt-key"] = sseKeys
session.Header.CommandStringFlags["encrypt"] = sse
session.Header.CommandBoolFlags["session"] = cliCtx.Bool("continue")
if cliCtx.Bool("preserve") {
session.Header.CommandBoolFlags["preserve"] = cliCtx.Bool("preserve")
}
session.Header.UserMetaData = userMetaMap
session.Header.CommandBoolFlags["md5"] = cliCtx.Bool("md5")
session.Header.CommandBoolFlags["disable-multipart"] = cliCtx.Bool("disable-multipart")
var e error
if session.Header.RootPath, e = os.Getwd(); e != nil {
session.Delete()
fatalIf(probe.NewError(e), "Unable to get current working folder.")
}
// extract URLs.
session.Header.CommandArgs = cliCtx.Args()
}
}
e := doCopySession(ctx, cancelCopy, cliCtx, session, encKeyDB, false)
if session != nil {
session.Delete()
}
return e
}
|
[
"\"MC_ENCRYPT_KEY\""
] |
[] |
[
"MC_ENCRYPT_KEY"
] |
[]
|
["MC_ENCRYPT_KEY"]
|
go
| 1 | 0 | |
dpaf/main.py
|
#!/usr/bin/env python
"""Author: Thomas Vachon"""
import subprocess
import json
import os
import re
import sys
from functools import partial
from fuzzywuzzy import fuzz
os.environ["KIVY_NO_CONSOLELOG"] = "1"
os.environ["KIVY_NO_ARGS"] = "1"
import kivy
kivy.require('1.10.0')
from kivy.config import Config
Config.set('graphics', 'width', '400')
Config.set('graphics', 'height', '350')
from kivy.app import App
from kivy.logger import Logger
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.gridlayout import GridLayout
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivy.core.window import Window
OP_COMMAND = '/usr/local/bin/op'
CONFIDENCE_THRESHOLD = 45
class LoginScreen(Screen):
def test_login(self, master_password):
#Window.size = (150, 75)
self.token = ObjectProperty()
try:
Logger.debug('Checking for OS Environ Value')
p = subprocess.Popen([OP_COMMAND, "signin", "my", "--output=raw"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
token_raw, errs = p.communicate(str.encode(master_password))
self.token = token_raw.decode("utf-8").rstrip()
if len(self.token) <= 5:
Logger.debug("Password is invalid")
close_button = Button(text='Exit')
popup = Popup(title='Incorrect Password',
content=close_button,
auto_dismiss=False)
close_button.bind(on_press=self.handle_error)
popup.open()
Logger.debug("Token is: {}".format(self.token))
except subprocess.CalledProcessError as err:
Logger.debug('Error: {}'.format(err))
Logger.debug('Error calling login')
close_button = Button(text='Exit')
popup = Popup(title='Error logging into 1Password CLI',
content=close_button,
auto_dismiss=False)
close_button.bind(on_press=self.handle_error)
popup.open()
if re.match('Invalid', self.token):
Logger.debug('Password was incorrect')
close_button = Button(text='Exit')
popup = Popup(title='Master Password was incorrect',
content=close_button,
auto_dismiss=False)
close_button.bind(on_press=self.handle_error)
popup.open()
try:
Logger.debug("Testing token value via op with token: {}".format(self.token))
p = subprocess.Popen([OP_COMMAND,
"list",
"vaults",
"--session=" + self.token],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
Logger.debug("OP Login was: {}".format(out.decode("utf-8").rstrip()))
except subprocess.CalledProcessError:
Logger.debug('Unable to log into 1pass cli using token')
close_button = Button(text='Exit')
popup = Popup(title='Error Listing 1Password Vaults - Run "op signin my" then try again',
content=close_button,
auto_dismiss=False)
close_button.bind(on_press=self.handle_error)
popup.open()
self.op_get_list(token=self.token)
presentation.current = 'search'
def handle_error(self, *args, **kwargs):
app = App.get_running_app().stop()
def op_get_list(self, token):
"""Function to get the JSON list of items from OP"""
p = subprocess.Popen([OP_COMMAND, "list", "items", "--session=" + token],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
op_items_raw, err = p.communicate()
#op_items_raw = TEST_JSON
try:
self.json_data = ObjectProperty()
self.json_data = json.loads(op_items_raw.rstrip())
except json.JSONDecodeError as err:
print_err("Failed to pase JSON: {0}".format(err))
class SearchScreen(Screen):
def op_search(self, user_filter):
"""Function to fuzzy search the OP JSON Structures"""
Logger.debug('Search screen presented')
#Window.size = (300, 300)
possible_matches = []
for item in self.parent.get_screen(name='login').json_data:
search_field = item['overview']['title']
if fuzz.ratio(user_filter, search_field) >= CONFIDENCE_THRESHOLD:
possible_matches.append(search_field)
self.matches = ObjectProperty()
self.matches = possible_matches
if len(possible_matches) > 1:
presentation.current = 'select'
class SelectScreen(Screen):
def user_select(self):
"""Function to have the user select from multiple close matches"""
Logger.debug('Select Screen presented')
#Window.size = (350,400)
matches = self.parent.get_screen(name='search').matches
for match in matches:
button = Button(text=match, id=match)
button.bind(on_press=partial(self.button_select, match))
self.ids.grid.add_widget(button)
def button_select(self, *args):
title = args[0]
app = App.get_running_app()
token = self.parent.get_screen(name='login').token
app.get_password(title=title, token=token)
presentation = Builder.load_file("main.kv")
def print_err(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class OpCli(App):
def build(self):
self.title = 'iTerm OP'
return presentation
def get_password(self, title, token):
Logger.debug('Get Password running')
password = self.do_get_password(entry=title, token=token)
self.return_password(password)
def do_get_password(self, entry, token):
Logger.debug('do get passeord running')
p = subprocess.Popen([OP_COMMAND, "get", "item", entry,
"--session=" + token], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
item_details_raw, errs = p.communicate()
try:
item_details = json.loads(item_details_raw.decode("utf-8").rstrip())
for field in item_details['details']['fields']:
if field['name'] == 'password' or field['designation'] == 'password':
password = field['value']
return password
except json.JSONDecodeError:
print_err("Unable to parse details!")
sys.exit(2)
def return_password(self, password):
Logger.debug('Returning password')
sys.stdout.write("{}\n".format(password))
sys.stdout.flush()
sys.exit(0)
if __name__ == '__main__':
OpCli().run()
|
[] |
[] |
[
"KIVY_NO_ARGS",
"KIVY_NO_CONSOLELOG"
] |
[]
|
["KIVY_NO_ARGS", "KIVY_NO_CONSOLELOG"]
|
python
| 2 | 0 | |
worker.go
|
package main
import (
log "github.com/Sirupsen/logrus"
"github.com/jrallison/go-workers"
"os"
)
func main() {
redisNamespace := os.Getenv("REDIS_NAMESPACE")
redisServer := os.Getenv("REDIS_SERVER")
redisDB := os.Getenv("REDIS_DB")
redisPool := os.Getenv("REDIS_POOL")
redisPwd := os.Getenv("REDIS_PWD")
if (redisServer == "") || (redisDB == "") || (redisPool == "") || (redisNamespace == "") {
log.Error("Please Start Worker with Required Arguments")
return
}
workers.Configure(map[string]string{
// location of redis instance
"namespace": redisNamespace,
"server": redisServer,
// instance of the database
"database": redisDB,
// redis password
"password": redisPwd,
// number of connections to keep open with redis
"pool": redisPool,
// unique process id for this instance of workers (for proper recovery of inprogress jobs on crash)
"process": "1",
})
// pull messages from "myqueue" with concurrency of 10
workers.Process("sendemail", SendEmail, 10)
//workers.Process("sendbatch", SendBatch, 10)
// stats will be available at http://localhost:8080/stats
go workers.StatsServer(5000)
// Blocks until process is told to exit via unix signal
workers.Run()
}
|
[
"\"REDIS_NAMESPACE\"",
"\"REDIS_SERVER\"",
"\"REDIS_DB\"",
"\"REDIS_POOL\"",
"\"REDIS_PWD\""
] |
[] |
[
"REDIS_NAMESPACE",
"REDIS_SERVER",
"REDIS_DB",
"REDIS_PWD",
"REDIS_POOL"
] |
[]
|
["REDIS_NAMESPACE", "REDIS_SERVER", "REDIS_DB", "REDIS_PWD", "REDIS_POOL"]
|
go
| 5 | 0 | |
main.go
|
package main
import (
"log"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strings"
"syscall"
)
func init() {
// make sure we only have one process and that it runs on the main thread
// (so that ideally, when we Exec, we keep our user switches and stuff)
runtime.GOMAXPROCS(1)
runtime.LockOSThread()
}
func main() {
log.SetFlags(0) // no timestamps on our logs
// Args that we pass to sudo
var args []string
var shell string
var ext string
// The command that was executed
cmd := os.Args[0]
ext = strings.TrimLeft(filepath.Ext(cmd), ".")
// If no extension, default to bash
if ext == "" {
ext = "bash"
}
// Resolve extension to a shell
shellFound, shellPathErr := exec.LookPath(ext)
if shellPathErr != nil {
log.Fatalf("error: find to find shell %v: %v", ext, shellPathErr)
}
shell = shellFound
// Shell is always launched as current user
username := ""
// user.Current() may not be implemented on some linux distros (e.g. alpine)
user, userErr := user.Current()
if userErr == nil {
username = user.Username
}
// Fallback to fetching the `LOGNAME` env
if username == "" {
username = os.Getenv("LOGNAME")
}
// Fallback to fetching the `USER` env
if username == "" {
username = os.Getenv("USER")
}
// Fallback to fetching `USERNAME` env
if username == "" {
username = os.Getenv("USERNAME")
}
// Fallback to calling `whoami` command
if username == "" {
whoami := exec.Command("whoami")
whoamiStdout, whoamiErr := whoami.Output()
if whoamiErr != nil {
log.Fatalf("error: unable to determine current user: %v", whoamiErr)
}
username = strings.TrimSpace(string(whoamiStdout))
}
// Give up
if username == "" {
log.Fatalf("error: unable to determine current user: %v", userErr)
}
// Set default shell (do not set to `sudosh`; it may cause infinite loops)
os.Setenv("SHELL", shell)
// Fetch environment
env := os.Environ()
// Lookup path for `sudo`
binary, sudoPathErr := exec.LookPath("sudo")
if sudoPathErr != nil {
log.Fatalf("error: find to find sudo: %v", sudoPathErr)
}
// Prepare `sudo` args
if len(os.Args) < 2 {
args = []string{"sudo", "-E", "-u", username, shell, "-l"}
} else {
args = append([]string{"sudo", "-E", "-u", username, shell}, os.Args[1:]...)
}
execErr := syscall.Exec(binary, args, env)
if execErr != nil {
log.Fatalf("error: exec failed: %v", execErr)
}
}
|
[
"\"LOGNAME\"",
"\"USER\"",
"\"USERNAME\""
] |
[] |
[
"USER",
"USERNAME",
"LOGNAME"
] |
[]
|
["USER", "USERNAME", "LOGNAME"]
|
go
| 3 | 0 | |
vendor/github.com/elastic/beats/metricbeat/module/zookeeper/testing.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package zookeeper
import (
"os"
)
// Helper functions for testing used in the zookeeper MetricSets.
// GetZookeeperEnvHost returns the hostname of the ZooKeeper server to use for
// testing. It reads the value from the ZOOKEEPER_HOST environment variable and
// returns localhost if it is not set.
func GetZookeeperEnvHost() string {
host := os.Getenv("ZOOKEEPER_HOST")
if len(host) == 0 {
host = "localhost"
}
return host
}
// GetZookeeperEnvPort returns the port of the ZooKeeper server to use for
// testing. It reads the value from the ZOOKEEPER_PORT environment variable and
// returns 2181 if it is not set.
func GetZookeeperEnvPort() string {
port := os.Getenv("ZOOKEEPER_PORT")
if len(port) == 0 {
port = "2181"
}
return port
}
|
[
"\"ZOOKEEPER_HOST\"",
"\"ZOOKEEPER_PORT\""
] |
[] |
[
"ZOOKEEPER_HOST",
"ZOOKEEPER_PORT"
] |
[]
|
["ZOOKEEPER_HOST", "ZOOKEEPER_PORT"]
|
go
| 2 | 0 | |
src/main/java/stringmanipulation/alternating/Solution.java
|
package stringmanipulation.alternating;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Scanner;
public class Solution {
// Complete the alternatingCharacters function below.
static int alternatingCharacters(String s) {
final int[] result = { 0 };
final String[] previous = { "" };
s
.codePoints()
.mapToObj(i -> String.valueOf((char) i))
.forEach(cStr -> {
if (cStr.equals(previous[0])) {
result[0]++;
return;
}
previous[0] = cStr;
});
return result[0];
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int q = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int qItr = 0; qItr < q; qItr++) {
String s = scanner.nextLine();
int result = alternatingCharacters(s);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
}
bufferedWriter.close();
scanner.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
scripts/updateHosts.go
|
//
// Update hosts for windows
//
package main
import (
"os"
"io"
"bufio"
"net/http"
"time"
"bytes"
"io/ioutil"
)
var (
HOSTS_PATH string = os.Getenv("SYSTEMROOT")+"\\system32\\drivers\\etc\\hosts"
SEARCH_STRING []byte = []byte("#TX-HOSTS")
HOSTS_SOURCE string = "http://tx.txthinking.com/hosts"
)
func main(){
var hosts []byte
f, err := os.OpenFile(HOSTS_PATH, os.O_RDONLY, 0444)
if err == nil {
bnr := bufio.NewReader(f)
for{
line, _, err := bnr.ReadLine()
if bytes.Compare(line,SEARCH_STRING)==0 || err == io.EOF{
break
}
hosts = append(hosts, append(line,[]byte("\r\n")...)...)
}
f.Close()
}
hosts = append(hosts, append(SEARCH_STRING,[]byte("\r\n")...)...)
res, err := http.Get(HOSTS_SOURCE)
if err != nil {
println(err.Error())
time.Sleep(3 * time.Second)
return
}
data, err := ioutil.ReadAll(res.Body)
if err != nil {
println(err.Error())
time.Sleep(3 * time.Second)
return
}
data = bytes.Replace(data, []byte("\n"), []byte("\r\n"), -1)
hosts = append(hosts, data...)
os.Rename(HOSTS_PATH, HOSTS_PATH+"-BAK-TX-HOSTS")
f, err = os.OpenFile(HOSTS_PATH, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
println(err.Error())
time.Sleep(3 * time.Second)
return
}
f.Write(hosts)
println("Success!")
time.Sleep(3 * time.Second)
}
|
[
"\"SYSTEMROOT\""
] |
[] |
[
"SYSTEMROOT"
] |
[]
|
["SYSTEMROOT"]
|
go
| 1 | 0 | |
vision.py
|
import logging as log
from humblerl import Interpreter
import keras.backend as K
from keras.layers import Conv2D, Conv2DTranspose, Dense, Flatten, Input, Lambda, Reshape
from keras.models import Model
from keras.optimizers import Adam
from skimage.transform import resize
from common_utils import get_model_path_if_exists
class BasicInterpreter(Interpreter):
def __init__(self, state_shape, crop_range, scale=1):
"""Initialize interpreter processors.
Args:
state_shape (tuple): Output shape.
crop_range (string): Range to crop as indices of array.
scale (float): Processed state is scaled by this factor.
"""
self.state_shape = state_shape
self.crop_range = crop_range
self.scale = scale
def __call__(self, state, reward=0.):
"""Resize states to `state_shape` with cropping of `crop_range`.
Args:
state (np.ndarray): Image to crop and resize.
reward (float): Reward.
Return:
np.ndarray: Cropped and reshaped to `state_shape` image.
float: Unchanged reward.
"""
# Crop image to `crop_range`, removes e.g. score bar
img = eval("state" + self.crop_range)
# Resize to 64x64 and cast to 0..1 values
img = resize(img, self.state_shape, mode='constant')
return img * self.scale, reward
def build_vae_model(vae_params, input_shape, model_path=None):
"""Builds VAE encoder, decoder using Keras Model and VAE loss.
Args:
vae_params (dict): VAE parameters from .json config.
input_shape (tuple): Input to encoder shape (state shape).
model_path (str): Path to VAE ckpt. Taken from .json config if `None` (Default: None)
Returns:
keras.models.Model: Compiled VAE, ready for training.
keras.models.Model: Encoder.
keras.models.Model: Decoder.
"""
if K.image_data_format() == 'channel_first':
raise ValueError("Channel first backends aren't supported!")
# Encoder img -> mu, logvar #
encoder_input = Input(shape=input_shape)
h = Conv2D(32, activation='relu', kernel_size=4, strides=2)(encoder_input) # -> 31x31x32
h = Conv2D(64, activation='relu', kernel_size=4, strides=2)(h) # -> 14x14x64
h = Conv2D(128, activation='relu', kernel_size=4, strides=2)(h) # -> 6x6x128
h = Conv2D(256, activation='relu', kernel_size=4, strides=2)(h) # -> 2x2x256
batch_size = K.shape(h)[0] # Needed to sample latent vector
h_shape = K.int_shape(h) # Needed to reconstruct in decoder
h = Flatten()(h)
mu = Dense(vae_params['latent_space_dim'])(h)
logvar = Dense(vae_params['latent_space_dim'])(h)
# Sample latent vector #
def sample(args):
mu, logvar = args
# NOTE: K.exp(logvar / 2) = var^(1/2) = std. deviation
return mu + K.exp(logvar / 2) * K.random_normal(
shape=(batch_size, vae_params['latent_space_dim']))
z = Lambda(sample, output_shape=(vae_params['latent_space_dim'],))([mu, logvar])
encoder = Model(encoder_input, [mu, logvar, z], name='Encoder')
encoder.summary(print_fn=lambda x: log.debug('%s', x))
# Decoder z -> img #
decoder_input = Input(shape=(vae_params['latent_space_dim'],))
h = Reshape(h_shape[1:])(
Dense(h_shape[1] * h_shape[2] * h_shape[3], activation='relu')(decoder_input)
)
h = Conv2DTranspose(128, activation='relu', kernel_size=4, strides=2)(h) # -> 6x6x128
h = Conv2DTranspose(64, activation='relu', kernel_size=4, strides=2)(h) # -> 14x14x64
h = Conv2DTranspose(32, activation='relu', kernel_size=4, strides=2)(h) # -> 30x30x32
out = Conv2DTranspose(3, activation='sigmoid', kernel_size=6, strides=2)(h) # -> 64x64x3
decoder = Model(decoder_input, out, name='Decoder')
decoder.summary(print_fn=lambda x: log.debug('%s', x))
# VAE loss #
def elbo_loss(target, pred):
# NOTE: You use K.reshape to preserve batch dim. K.flatten doesn't work like flatten layer
# and flatten batch dim. too!
# NOTE 2: K.binary_crossentropy does element-wise crossentropy as you need (it calls
# tf.nn.sigmoid_cross_entropy_with_logits in backend), but Keras loss
# binary_crossentropy would average over spatial dim. You sum it as you don't want
# to weight reconstruction loss lower (divide by H * W * C) then KL loss.
reconstruction_loss = K.sum(
K.binary_crossentropy(
K.reshape(target, [batch_size, -1]), K.reshape(pred, [batch_size, -1])
),
axis=1
)
# NOTE: Closed form of KL divergence for Gaussians.
# See Appendix B from VAE paper (Kingma 2014):
# https://arxiv.org/abs/1312.6114
KL_loss = K.sum(
1. + logvar - K.square(mu) - K.exp(logvar),
axis=1
) / 2
return reconstruction_loss - KL_loss
# Build and compile VAE model #
decoder_output = decoder(encoder(encoder_input)[2])
vae = Model(encoder_input, decoder_output, name='VAE')
vae.compile(optimizer=Adam(lr=vae_params['learning_rate']), loss=elbo_loss)
vae.summary(print_fn=lambda x: log.debug('%s', x))
model_path = get_model_path_if_exists(
path=model_path, default_path=vae_params['ckpt_path'], model_name="VAE")
if model_path is not None:
vae.load_weights(model_path)
log.info("Loaded VAE model weights from: %s", model_path)
return vae, encoder, decoder
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
testutil/request/request.go
|
package request // import "github.com/docker/docker/testutil/request"
import (
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/testutil/environment"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"gotest.tools/v3/assert"
)
// NewAPIClient returns a docker API client configured from environment variables
func NewAPIClient(t testing.TB, ops ...client.Opt) client.APIClient {
t.Helper()
ops = append([]client.Opt{client.FromEnv}, ops...)
clt, err := client.NewClientWithOpts(ops...)
assert.NilError(t, err)
return clt
}
// DaemonTime provides the current time on the daemon host
func DaemonTime(ctx context.Context, t testing.TB, client client.APIClient, testEnv *environment.Execution) time.Time {
t.Helper()
if testEnv.IsLocalDaemon() {
return time.Now()
}
info, err := client.Info(ctx)
assert.NilError(t, err)
dt, err := time.Parse(time.RFC3339Nano, info.SystemTime)
assert.NilError(t, err, "invalid time format in GET /info response")
return dt
}
// DaemonUnixTime returns the current time on the daemon host with nanoseconds precision.
// It return the time formatted how the client sends timestamps to the server.
func DaemonUnixTime(ctx context.Context, t testing.TB, client client.APIClient, testEnv *environment.Execution) string {
t.Helper()
dt := DaemonTime(ctx, t, client, testEnv)
return fmt.Sprintf("%d.%09d", dt.Unix(), int64(dt.Nanosecond()))
}
// Post creates and execute a POST request on the specified host and endpoint, with the specified request modifiers
func Post(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, append(modifiers, Method(http.MethodPost))...)
}
// Delete creates and execute a DELETE request on the specified host and endpoint, with the specified request modifiers
func Delete(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, append(modifiers, Method(http.MethodDelete))...)
}
// Get creates and execute a GET request on the specified host and endpoint, with the specified request modifiers
func Get(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, modifiers...)
}
// Head creates and execute a HEAD request on the specified host and endpoint, with the specified request modifiers
func Head(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, append(modifiers, Method(http.MethodHead))...)
}
// Do creates and execute a request on the specified endpoint, with the specified request modifiers
func Do(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
opts := &Options{
host: DaemonHost(),
}
for _, mod := range modifiers {
mod(opts)
}
req, err := newRequest(endpoint, opts)
if err != nil {
return nil, nil, err
}
client, err := newHTTPClient(opts.host)
if err != nil {
return nil, nil, err
}
resp, err := client.Do(req)
var body io.ReadCloser
if resp != nil {
body = ioutils.NewReadCloserWrapper(resp.Body, func() error {
defer resp.Body.Close()
return nil
})
}
return resp, body, err
}
// ReadBody read the specified ReadCloser content and returns it
func ReadBody(b io.ReadCloser) ([]byte, error) {
defer b.Close()
return io.ReadAll(b)
}
// newRequest creates a new http Request to the specified host and endpoint, with the specified request modifiers
func newRequest(endpoint string, opts *Options) (*http.Request, error) {
hostURL, err := client.ParseHostURL(opts.host)
if err != nil {
return nil, errors.Wrapf(err, "failed parsing url %q", opts.host)
}
req, err := http.NewRequest(http.MethodGet, endpoint, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to create request")
}
if os.Getenv("DOCKER_TLS_VERIFY") != "" {
req.URL.Scheme = "https"
} else {
req.URL.Scheme = "http"
}
req.URL.Host = hostURL.Host
for _, config := range opts.requestModifiers {
if err := config(req); err != nil {
return nil, err
}
}
return req, nil
}
// newHTTPClient creates an http client for the specific host
// TODO: Share more code with client.defaultHTTPClient
func newHTTPClient(host string) (*http.Client, error) {
// FIXME(vdemeester) 10*time.Second timeout of SockRequest… ?
hostURL, err := client.ParseHostURL(host)
if err != nil {
return nil, err
}
transport := new(http.Transport)
if hostURL.Scheme == "tcp" && os.Getenv("DOCKER_TLS_VERIFY") != "" {
// Setup the socket TLS configuration.
tlsConfig, err := getTLSConfig()
if err != nil {
return nil, err
}
transport = &http.Transport{TLSClientConfig: tlsConfig}
}
transport.DisableKeepAlives = true
err = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
return &http.Client{Transport: transport}, err
}
func getTLSConfig() (*tls.Config, error) {
dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
if dockerCertPath == "" {
return nil, errors.New("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable")
}
option := &tlsconfig.Options{
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
}
tlsConfig, err := tlsconfig.Client(*option)
if err != nil {
return nil, err
}
return tlsConfig, nil
}
// DaemonHost return the daemon host string for this test execution
func DaemonHost() string {
daemonURLStr := client.DefaultDockerHost
if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" {
daemonURLStr = daemonHostVar
}
return daemonURLStr
}
// SockConn opens a connection on the specified socket
func SockConn(timeout time.Duration, daemon string) (net.Conn, error) {
daemonURL, err := url.Parse(daemon)
if err != nil {
return nil, errors.Wrapf(err, "could not parse url %q", daemon)
}
var c net.Conn
switch daemonURL.Scheme {
case "npipe":
return npipeDial(daemonURL.Path, timeout)
case "unix":
return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout)
case "tcp":
if os.Getenv("DOCKER_TLS_VERIFY") != "" {
// Setup the socket TLS configuration.
tlsConfig, err := getTLSConfig()
if err != nil {
return nil, err
}
dialer := &net.Dialer{Timeout: timeout}
return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig)
}
return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout)
default:
return c, errors.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon)
}
}
|
[
"\"DOCKER_TLS_VERIFY\"",
"\"DOCKER_TLS_VERIFY\"",
"\"DOCKER_CERT_PATH\"",
"\"DOCKER_HOST\"",
"\"DOCKER_TLS_VERIFY\""
] |
[] |
[
"DOCKER_HOST",
"DOCKER_CERT_PATH",
"DOCKER_TLS_VERIFY"
] |
[]
|
["DOCKER_HOST", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY"]
|
go
| 3 | 0 | |
client.go
|
package sentry
import (
"context"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"reflect"
"sort"
"time"
)
// maxErrorDepth is the maximum number of errors reported in a chain of errors.
// This protects the SDK from an arbitrarily long chain of wrapped errors.
//
// An additional consideration is that arguably reporting a long chain of errors
// is of little use when debugging production errors with Sentry. The Sentry UI
// is not optimized for long chains either. The top-level error together with a
// stack trace is often the most useful information.
const maxErrorDepth = 10
// usageError is used to report to Sentry an SDK usage error.
//
// It is not exported because it is never returned by any function or method in
// the exported API.
type usageError struct {
error
}
// Logger is an instance of log.Logger that is use to provide debug information about running Sentry Client
// can be enabled by either using Logger.SetOutput directly or with Debug client option.
var Logger = log.New(ioutil.Discard, "[Sentry] ", log.LstdFlags)
// EventProcessor is a function that processes an event.
// Event processors are used to change an event before it is sent to Sentry.
type EventProcessor func(event *Event, hint *EventHint) *Event
// EventModifier is the interface that wraps the ApplyToEvent method.
//
// ApplyToEvent changes an event based on external data and/or
// an event hint.
type EventModifier interface {
ApplyToEvent(event *Event, hint *EventHint) *Event
}
var globalEventProcessors []EventProcessor
// AddGlobalEventProcessor adds processor to the global list of event
// processors. Global event processors apply to all events.
func AddGlobalEventProcessor(processor EventProcessor) {
globalEventProcessors = append(globalEventProcessors, processor)
}
// Integration allows for registering a functions that modify or discard captured events.
type Integration interface {
Name() string
SetupOnce(client *Client)
}
// ClientOptions that configures a SDK Client.
type ClientOptions struct {
// The DSN to use. If the DSN is not set, the client is effectively
// disabled.
Dsn string
// In debug mode, the debug information is printed to stdout to help you
// understand what sentry is doing.
Debug bool
// Configures whether SDK should generate and attach stacktraces to pure
// capture message calls.
AttachStacktrace bool
// The sample rate for event submission (0.0 - 1.0, defaults to 1.0).
SampleRate float64
// List of regexp strings that will be used to match against event's message
// and if applicable, caught errors type and value.
// If the match is found, then a whole event will be dropped.
IgnoreErrors []string
// Before send callback.
BeforeSend func(event *Event, hint *EventHint) *Event
// Before breadcrumb add callback.
BeforeBreadcrumb func(breadcrumb *Breadcrumb, hint *BreadcrumbHint) *Breadcrumb
// Integrations to be installed on the current Client, receives default
// integrations.
Integrations func([]Integration) []Integration
// io.Writer implementation that should be used with the Debug mode.
DebugWriter io.Writer
// The transport to use. Defaults to HTTPTransport.
Transport Transport
// The server name to be reported.
ServerName string
// The release to be sent with events.
Release string
// The dist to be sent with events.
Dist string
// The environment to be sent with events.
Environment string
// Maximum number of breadcrumbs.
MaxBreadcrumbs int
// An optional pointer to http.Client that will be used with a default
// HTTPTransport. Using your own client will make HTTPTransport, HTTPProxy,
// HTTPSProxy and CaCerts options ignored.
HTTPClient *http.Client
// An optional pointer to http.Transport that will be used with a default
// HTTPTransport. Using your own transport will make HTTPProxy, HTTPSProxy
// and CaCerts options ignored.
HTTPTransport http.RoundTripper
// An optional HTTP proxy to use.
// This will default to the HTTP_PROXY environment variable.
HTTPProxy string
// An optional HTTPS proxy to use.
// This will default to the HTTPS_PROXY environment variable.
// HTTPS_PROXY takes precedence over HTTP_PROXY for https requests.
HTTPSProxy string
// An optional set of SSL certificates to use.
CaCerts *x509.CertPool
}
// Client is the underlying processor that is used by the main API and Hub
// instances.
type Client struct {
options ClientOptions
dsn *Dsn
eventProcessors []EventProcessor
integrations []Integration
Transport Transport
}
// NewClient creates and returns an instance of Client configured using ClientOptions.
func NewClient(options ClientOptions) (*Client, error) {
if options.Debug {
debugWriter := options.DebugWriter
if debugWriter == nil {
debugWriter = os.Stdout
}
Logger.SetOutput(debugWriter)
}
if options.Dsn == "" {
options.Dsn = os.Getenv("SENTRY_DSN")
}
if options.Release == "" {
options.Release = os.Getenv("SENTRY_RELEASE")
}
if options.Environment == "" {
options.Environment = os.Getenv("SENTRY_ENVIRONMENT")
}
var dsn *Dsn
if options.Dsn != "" {
var err error
dsn, err = NewDsn(options.Dsn)
if err != nil {
return nil, err
}
}
client := Client{
options: options,
dsn: dsn,
}
client.setupTransport()
client.setupIntegrations()
return &client, nil
}
func (client *Client) setupTransport() {
transport := client.options.Transport
if transport == nil {
if client.options.Dsn == "" {
transport = new(noopTransport)
} else {
transport = NewHTTPTransport()
}
}
transport.Configure(client.options)
client.Transport = transport
}
func (client *Client) setupIntegrations() {
integrations := []Integration{
new(contextifyFramesIntegration),
new(environmentIntegration),
new(modulesIntegration),
new(ignoreErrorsIntegration),
}
if client.options.Integrations != nil {
integrations = client.options.Integrations(integrations)
}
for _, integration := range integrations {
if client.integrationAlreadyInstalled(integration.Name()) {
Logger.Printf("Integration %s is already installed\n", integration.Name())
continue
}
client.integrations = append(client.integrations, integration)
integration.SetupOnce(client)
Logger.Printf("Integration installed: %s\n", integration.Name())
}
}
// AddEventProcessor adds an event processor to the client.
func (client *Client) AddEventProcessor(processor EventProcessor) {
client.eventProcessors = append(client.eventProcessors, processor)
}
// Options return ClientOptions for the current Client.
func (client Client) Options() ClientOptions {
return client.options
}
// CaptureMessage captures an arbitrary message.
func (client *Client) CaptureMessage(message string, hint *EventHint, scope EventModifier) *EventID {
event := client.eventFromMessage(message, LevelInfo)
return client.CaptureEvent(event, hint, scope)
}
// CaptureException captures an error.
func (client *Client) CaptureException(exception error, hint *EventHint, scope EventModifier) *EventID {
event := client.eventFromException(exception, LevelError)
return client.CaptureEvent(event, hint, scope)
}
// CaptureEvent captures an event on the currently active client if any.
//
// The event must already be assembled. Typically code would instead use
// the utility methods like CaptureException. The return value is the
// event ID. In case Sentry is disabled or event was dropped, the return value will be nil.
func (client *Client) CaptureEvent(event *Event, hint *EventHint, scope EventModifier) *EventID {
return client.processEvent(event, hint, scope)
}
// Recover captures a panic.
// Returns EventID if successfully, or nil if there's no error to recover from.
func (client *Client) Recover(err interface{}, hint *EventHint, scope EventModifier) *EventID {
if err == nil {
err = recover()
}
if err != nil {
if err, ok := err.(error); ok {
event := client.eventFromException(err, LevelFatal)
return client.CaptureEvent(event, hint, scope)
}
if err, ok := err.(string); ok {
event := client.eventFromMessage(err, LevelFatal)
return client.CaptureEvent(event, hint, scope)
}
}
return nil
}
// RecoverWithContext captures a panic and passes relevant context object.
// Returns EventID if successfully, or nil if there's no error to recover from.
func (client *Client) RecoverWithContext(
ctx context.Context,
err interface{},
hint *EventHint,
scope EventModifier,
) *EventID {
if err == nil {
err = recover()
}
if err != nil {
if hint.Context == nil && ctx != nil {
hint.Context = ctx
}
if err, ok := err.(error); ok {
event := client.eventFromException(err, LevelFatal)
return client.CaptureEvent(event, hint, scope)
}
if err, ok := err.(string); ok {
event := client.eventFromMessage(err, LevelFatal)
return client.CaptureEvent(event, hint, scope)
}
}
return nil
}
// Flush waits until the underlying Transport sends any buffered events to the
// Sentry server, blocking for at most the given timeout. It returns false if
// the timeout was reached. In that case, some events may not have been sent.
//
// Flush should be called before terminating the program to avoid
// unintentionally dropping events.
//
// Do not call Flush indiscriminately after every call to CaptureEvent,
// CaptureException or CaptureMessage. Instead, to have the SDK send events over
// the network synchronously, configure it to use the HTTPSyncTransport in the
// call to Init.
func (client *Client) Flush(timeout time.Duration) bool {
return client.Transport.Flush(timeout)
}
func (client *Client) eventFromMessage(message string, level Level) *Event {
event := NewEvent()
event.Level = level
event.Message = message
if client.Options().AttachStacktrace {
event.Threads = []Thread{{
Stacktrace: NewStacktrace(),
Crashed: false,
Current: true,
}}
}
return event
}
func (client *Client) eventFromException(exception error, level Level) *Event {
err := exception
if err == nil {
err = usageError{fmt.Errorf("%s called with nil error", callerFunctionName())}
}
event := NewEvent()
event.Level = level
for i := 0; i < maxErrorDepth && err != nil; i++ {
event.Exception = append(event.Exception, Exception{
Value: err.Error(),
Type: reflect.TypeOf(err).String(),
Stacktrace: ExtractStacktrace(err),
})
switch previous := err.(type) {
case interface{ Unwrap() error }:
err = previous.Unwrap()
case interface{ Cause() error }:
err = previous.Cause()
default:
err = nil
}
}
// Add a trace of the current stack to the most recent error in a chain if
// it doesn't have a stack trace yet.
// We only add to the most recent error to avoid duplication and because the
// current stack is most likely unrelated to errors deeper in the chain.
if event.Exception[0].Stacktrace == nil {
event.Exception[0].Stacktrace = NewStacktrace()
}
// event.Exception should be sorted such that the most recent error is last.
reverse(event.Exception)
return event
}
// reverse reverses the slice a in place.
func reverse(a []Exception) {
for i := len(a)/2 - 1; i >= 0; i-- {
opp := len(a) - 1 - i
a[i], a[opp] = a[opp], a[i]
}
}
func (client *Client) processEvent(event *Event, hint *EventHint, scope EventModifier) *EventID {
options := client.Options()
// TODO: Reconsider if its worth going away from default implementation
// of other SDKs. In Go zero value (default) for float32 is 0.0,
// which means that if someone uses ClientOptions{} struct directly
// and we would not check for 0 here, we'd skip all events by default
if options.SampleRate != 0.0 {
randomFloat := rand.New(rand.NewSource(time.Now().UnixNano())).Float64()
if randomFloat > options.SampleRate {
Logger.Println("Event dropped due to SampleRate hit.")
return nil
}
}
if event = client.prepareEvent(event, hint, scope); event == nil {
return nil
}
// As per spec, transactions do not go through BeforeSend.
if event.Type != transactionType && options.BeforeSend != nil {
h := &EventHint{}
if hint != nil {
h = hint
}
if event = options.BeforeSend(event, h); event == nil {
Logger.Println("Event dropped due to BeforeSend callback.")
return nil
}
}
client.Transport.SendEvent(event)
return &event.EventID
}
func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventModifier) *Event {
if event.EventID == "" {
event.EventID = EventID(uuid())
}
if event.Timestamp.IsZero() {
event.Timestamp = time.Now().UTC()
}
if event.Level == "" {
event.Level = LevelInfo
}
if event.ServerName == "" {
if client.Options().ServerName != "" {
event.ServerName = client.Options().ServerName
} else if hostname, err := os.Hostname(); err == nil {
event.ServerName = hostname
}
}
if event.Release == "" && client.Options().Release != "" {
event.Release = client.Options().Release
}
if event.Dist == "" && client.Options().Dist != "" {
event.Dist = client.Options().Dist
}
if event.Environment == "" && client.Options().Environment != "" {
event.Environment = client.Options().Environment
}
event.Platform = "go"
event.Sdk = SdkInfo{
Name: "sentry.go",
Version: Version,
Integrations: client.listIntegrations(),
Packages: []SdkPackage{{
Name: "sentry-go",
Version: Version,
}},
}
if scope != nil {
event = scope.ApplyToEvent(event, hint)
if event == nil {
return nil
}
}
for _, processor := range client.eventProcessors {
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Client EventProcessors: %s\n", id)
return nil
}
}
for _, processor := range globalEventProcessors {
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Global EventProcessors: %s\n", id)
return nil
}
}
return event
}
func (client Client) listIntegrations() []string {
integrations := make([]string, 0, len(client.integrations))
for _, integration := range client.integrations {
integrations = append(integrations, integration.Name())
}
sort.Strings(integrations)
return integrations
}
func (client Client) integrationAlreadyInstalled(name string) bool {
for _, integration := range client.integrations {
if integration.Name() == name {
return true
}
}
return false
}
|
[
"\"SENTRY_DSN\"",
"\"SENTRY_RELEASE\"",
"\"SENTRY_ENVIRONMENT\""
] |
[] |
[
"SENTRY_DSN",
"SENTRY_RELEASE",
"SENTRY_ENVIRONMENT"
] |
[]
|
["SENTRY_DSN", "SENTRY_RELEASE", "SENTRY_ENVIRONMENT"]
|
go
| 3 | 0 | |
pkg/store/kubeconfig_store_vault.go
|
// Copyright 2021 Daniel Foehr
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"encoding/base64"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"sync"
vaultapi "github.com/hashicorp/vault/api"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
"github.com/danielfoehrkn/kubeswitch/types"
)
func NewVaultStore(vaultAPIAddressFromFlag, vaultTokenFileName, kubeconfigName string, kubeconfigStore types.KubeconfigStore) (*VaultStore, error) {
vaultStoreConfig := &types.StoreConfigVault{}
if kubeconfigStore.Config != nil {
buf, err := yaml.Marshal(kubeconfigStore.Config)
if err != nil {
log.Fatal(err)
}
err = yaml.Unmarshal(buf, vaultStoreConfig)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal vault config: %w", err)
}
}
vaultAPI := vaultStoreConfig.VaultAPIAddress
if len(vaultAPIAddressFromFlag) > 0 {
vaultAPI = vaultAPIAddressFromFlag
}
vaultAddress := os.Getenv("VAULT_ADDR")
if len(vaultAddress) > 0 {
vaultAPI = vaultAddress
}
if len(vaultAPI) == 0 {
return nil, fmt.Errorf("when using the vault kubeconfig store, the API address of the vault has to be provided either by command line argument \"vaultAPI\", via environment variable \"VAULT_ADDR\" or via SwitchConfig file")
}
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
var vaultToken string
// https://www.vaultproject.io/docs/commands/token-helper
tokenBytes, _ := ioutil.ReadFile(fmt.Sprintf("%s/%s", home, vaultTokenFileName))
if tokenBytes != nil {
vaultToken = string(tokenBytes)
}
vaultTokenEnv := os.Getenv("VAULT_TOKEN")
if len(vaultTokenEnv) > 0 {
vaultToken = vaultTokenEnv
}
if len(vaultToken) == 0 {
return nil, fmt.Errorf("when using the vault kubeconfig store, a vault API token must be provided. Per default, the token file in \"~.vault-token\" is used. The default token can be overriden via the environment variable \"VAULT_TOKEN\"")
}
vaultConfig := &vaultapi.Config{
Address: vaultAPI,
}
client, err := vaultapi.NewClient(vaultConfig)
if err != nil {
return nil, err
}
client.SetToken(vaultToken)
return &VaultStore{
Logger: logrus.New().WithField("store", types.StoreKindVault),
KubeconfigName: kubeconfigName,
KubeconfigStore: kubeconfigStore,
Client: client,
}, nil
}
func (s *VaultStore) GetID() string {
id := "default"
if s.KubeconfigStore.ID != nil {
id = *s.KubeconfigStore.ID
}
return fmt.Sprintf("%s.%s", types.StoreKindVault, id)
}
func (s *VaultStore) GetKind() types.StoreKind {
return types.StoreKindVault
}
func (s *VaultStore) GetStoreConfig() types.KubeconfigStore {
return s.KubeconfigStore
}
func (s *VaultStore) GetLogger() *logrus.Entry {
return s.Logger
}
func (s *VaultStore) recursivePathTraversal(wg *sync.WaitGroup, searchPath string, channel chan SearchResult) {
defer wg.Done()
secret, err := s.Client.Logical().List(searchPath)
if err != nil {
channel <- SearchResult{
KubeconfigPath: "",
Error: err,
}
return
}
if secret == nil {
s.Logger.Infof("No secrets found for path %s", searchPath)
return
}
items := secret.Data["keys"].([]interface{})
for _, item := range items {
itemPath := fmt.Sprintf("%s/%s", strings.TrimSuffix(searchPath, "/"), item)
if strings.HasSuffix(item.(string), "/") {
// this is another folder
wg.Add(1)
go s.recursivePathTraversal(wg, itemPath, channel)
} else if item != "" {
// found an actual secret
channel <- SearchResult{
KubeconfigPath: itemPath,
Error: err,
}
}
}
}
func (s *VaultStore) StartSearch(channel chan SearchResult) {
wg := sync.WaitGroup{}
// start multiple recursive searches from different root paths
for _, path := range s.vaultPaths {
s.Logger.Debugf("discovering secrets from vault under path %q", path)
wg.Add(1)
go s.recursivePathTraversal(&wg, path, channel)
}
wg.Wait()
}
func getBytesFromSecretValue(v interface{}) ([]byte, error) {
data, ok := v.(string)
if !ok {
return nil, fmt.Errorf("failed to marshal value into string")
}
bytes := []byte(data)
// check if it is base64 encode - if yes use the decoded version
base64, err := base64.StdEncoding.DecodeString(data)
if err == nil {
bytes = base64
}
return bytes, nil
}
func (s *VaultStore) GetKubeconfigForPath(path string) ([]byte, error) {
s.Logger.Debugf("vault: getting secret for path %q", path)
secret, err := s.Client.Logical().Read(path)
if err != nil {
return nil, fmt.Errorf("could not read secret with path '%s': %v", path, err)
}
if secret == nil {
return nil, fmt.Errorf("no kubeconfig found for path %s", path)
}
if len(secret.Data) != 1 {
return nil, fmt.Errorf("cannot read kubeconfig from %q. Only support one entry in the secret", path)
}
for secretKey, data := range secret.Data {
matched, err := filepath.Match(s.KubeconfigName, secretKey)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("cannot read kubeconfig from %q. Key %q does not match desired kubeconfig name", path, s.KubeconfigName)
}
bytes, err := getBytesFromSecretValue(data)
if err != nil {
return nil, fmt.Errorf("cannot read kubeconfig from %q: %v", path, err)
}
return bytes, nil
}
return nil, fmt.Errorf("should not happen")
}
func (s *VaultStore) VerifyKubeconfigPaths() error {
var duplicatePath = make(map[string]*struct{})
for _, path := range s.KubeconfigStore.Paths {
// do not add duplicate paths
if duplicatePath[path] != nil {
continue
}
duplicatePath[path] = &struct{}{}
_, err := s.Client.Logical().Read(path)
if err != nil {
return err
}
s.vaultPaths = append(s.vaultPaths, path)
}
return nil
}
|
[
"\"VAULT_ADDR\"",
"\"VAULT_TOKEN\""
] |
[] |
[
"VAULT_ADDR",
"VAULT_TOKEN"
] |
[]
|
["VAULT_ADDR", "VAULT_TOKEN"]
|
go
| 2 | 0 | |
Providers/Scripts/3.x/Scripts/nxService.py
|
#!/usr/bin/env python
# ===================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# See license.txt for license information.
# ===================================
from contextlib import contextmanager
import subprocess
import os
import sys
import glob
import codecs
import imp
import time
import copy
import re
import fnmatch
from functools import reduce
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
helperlib = imp.load_source('helperlib', '../helperlib.py')
LG = nxDSCLog.DSCLog
# [ClassVersion("1.0.0"),FriendlyName("nxService"), SupportsInventory()]
# class MSFT_nxServiceResource : OMI_BaseResource
# {
# [key, InventoryFilter] string Name;
# [write,required,ValueMap{"init", "upstart", "systemd"},Values{"init","upstart","systemd"}, InventoryFilter] string Controller;
# [write, InventoryFilter] boolean Enabled;
# [write,ValueMap{"Running", "Stopped"},Values{"Running", "Stopped"}, InventoryFilter] string State;
# [read] string Path;
# [read] string Description;
# [read] string Runlevels;
# };
global show_mof
show_mof = False
def init_vars(Name, Controller, Enabled, State):
if Name is None:
Name = ''
if Name == '*':
Name = ''
if Controller is None or Controller == '*':
Controller = GetController()
if Enabled is None:
Enabled = False
Enabled = (Enabled == True)
if State is None:
State = ''
return Name, Controller.lower(), Enabled, State.lower()
def Set_Marshall(Name, Controller, Enabled, State):
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
return [-1]
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
if Controller == '':
return [-1]
retval = Set(Name, Controller, Enabled, State)
return retval
def Test_Marshall(Name, Controller, Enabled, State):
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
return [-1]
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
if Controller == '':
return [-1]
retval = Test(Name, Controller, Enabled, State)
return retval
def Get_Marshall(Name, Controller, Enabled, State):
arg_names = list(locals().keys())
arg_names.append('Path')
arg_names.append('Runlevels')
arg_names.append('Description')
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
if Controller == '':
return [-1], {}
retval = 0
(retval, Name, Controller, Enabled, State, Path, Description, Runlevels) = Get(
Name, Controller, Enabled, State)
Name = protocol.MI_String(Name)
Controller = protocol.MI_String(Controller)
Enabled = protocol.MI_Boolean(Enabled)
State = protocol.MI_String(State)
Path = protocol.MI_String(Path)
Description = protocol.MI_String(Description)
Runlevels = protocol.MI_String(Runlevels)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Inventory_Marshall(Name, Controller, Enabled, State):
FilterEnabled = (Enabled != None)
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
if Controller == '':
return -1, {"__Inventory": {}}
sc = ServiceContext(Name, Controller, Enabled, State)
sc.FilterEnabled = FilterEnabled
if not GetAll(sc):
return -1, {"__Inventory": {}}
for srv in sc.services_list:
srv['Name'] = protocol.MI_String(srv['Name'])
srv['Controller'] = protocol.MI_String(srv['Controller'])
srv['Enabled'] = protocol.MI_Boolean(srv['Enabled'])
srv['State'] = protocol.MI_String(srv['State'])
srv['Path'] = protocol.MI_String(srv['Path'])
srv['Description'] = protocol.MI_String(srv['Description'])
srv['Runlevels'] = protocol.MI_String(srv['Runlevels'])
Inventory = protocol.MI_InstanceA(sc.services_list)
retd = {}
retd["__Inventory"] = Inventory
return 0, retd
#
# Begin user defined DSC functions
#
def SetShowMof(a):
global show_mof
show_mof = a
def ShowMof(op, Name, Controller, Enabled, State):
if not show_mof:
return
mof = ''
mof += op + ' nxService MyService'
mof += '{\n'
mof += ' Name = "' + Name + '"\n'
mof += ' Controller = "' + Controller + '"\n'
mof += ' Enabled = ' + str(Enabled) + '\n'
mof += ' State = "' + State + '"\n'
mof += '}\n'
f = open('./test_mofs.log', 'a')
Print(mof, file=f)
LG().Log('INFO', mof)
f.close()
def Print(s, file=sys.stdout):
file.write(s + '\n')
@contextmanager
def opened_w_error(filename, mode="r"):
"""
This context ensures the file is closed.
"""
try:
f = codecs.open(filename, encoding='utf-8', mode=mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def RunGetOutput(cmd, no_output, chk_err=True):
"""
Wrapper for subprocess.check_output.
Execute 'cmd'. Returns return code and STDOUT,
trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
"""
def check_output(no_output, *popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
if no_output:
out_file = None
else:
out_file = subprocess.PIPE
process = subprocess.Popen(stdout=out_file, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" \
% (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
output = b''
try:
output = subprocess.check_output(
no_output, cmd, stderr=subprocess.STDOUT, shell=True)
if output is None:
output = b''
except subprocess.CalledProcessError as e:
if chk_err:
Print('CalledProcessError. Error Code is ' +
str(e.returncode), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Error Code is '
+ str(e.returncode))
Print(
'CalledProcessError. Command string was '
+ e.cmd, file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command string was ' + e.cmd)
Print('CalledProcessError. Command result was ' +
(e.output[:-1]).decode('ascii', 'ignore'), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command result was '
+ (e.output[:-1]).decode('ascii', 'ignore'))
if no_output:
return e.returncode, None
else:
return e.returncode, e.output.decode('ascii', 'ignore')
if no_output:
return 0, None
else:
return 0, output.decode('ascii', 'ignore')
def RunGetOutputNoStderr(cmd, no_output, chk_err=True):
"""
Wrapper for subprocess.check_output without stderr.
Execute 'cmd'. Returns return code and STDOUT,
trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
"""
def check_output(no_output, *popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
if no_output:
out_file = None
else:
out_file = subprocess.PIPE
process = subprocess.Popen(stdout=out_file, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" \
% (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
output = b''
try:
output = subprocess.check_output(
no_output, cmd, stderr=subprocess.DEVNULL, shell=True)
if output is None:
output = b''
except subprocess.CalledProcessError as e:
if chk_err:
Print('CalledProcessError. Error Code is ' +
str(e.returncode), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Error Code is '
+ str(e.returncode))
Print(
'CalledProcessError. Command string was '
+ e.cmd, file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command string was ' + e.cmd)
Print('CalledProcessError. Command result was ' +
(e.output[:-1]).decode('ascii', 'ignore'), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command result was '
+ (e.output[:-1]).decode('ascii', 'ignore'))
if no_output:
return e.returncode, None
else:
return e.returncode, e.output.decode('ascii', 'ignore')
if no_output:
return 0, None
else:
return 0, output.decode('ascii', 'ignore')
systemctl_path = "/usr/bin/systemctl"
upstart_start_path = "/sbin/start"
upstart_stop_path = "/sbin/stop"
upstart_status_path = "/sbin/status"
if os.path.exists('/sbin'):
os.environ['PATH']=os.environ['PATH']+':/sbin'
code, out = RunGetOutput('which service', False, False)
initd_service = out.strip('\n')
initd_chkconfig = "/sbin/chkconfig"
initd_invokerc = "/usr/sbin/invoke-rc.d"
initd_updaterc = "/usr/sbin/update-rc.d"
lsb_install_initd = "/usr/lib/lsb/install_initd"
lsb_remove_initd = "/usr/lib/lsb/remove_initd"
runlevel_path = "/sbin/runlevel"
def ReadFile(path):
"""
Safely attempt to read a file,
ensuring file is always closed at exit.
Return the data and the exception object.
The data is None if an error occurred.
The error is None if the data was read.
Log results to stderr.
"""
d = None
error = None
with opened_w_error(path, 'rb') as (F, error):
if error:
Print("Exception opening file " + path + " Error Code: " +
str(error.errno) +
" Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path +
" Error Code: " +
str(error.errno) + " Error: " + error.message +
error.strerror)
else:
d = F.read()
return d, error
def WriteFile(path, contents):
"""
Safely attempt to write data to a file,
replacing the existing file or creating it and
ensuring file is always closed at exit.
Return the exception object.
The error is None if the data was written.
Log results to stderr.
"""
error = None
with opened_w_error(path, 'wb+') as (F, error):
if error:
Print("Exception opening file " + path + " Error Code: " +
str(error.errno) +
" Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path +
" Error Code: " +
str(error.errno) + " Error: " + error.message +
error.strerror)
else:
F.write(contents)
return error
def Process(params, no_output=False):
line = ''
spc = ''
for p in params:
line += (spc + p)
if len(spc) is 0:
spc = ' '
code, out = RunGetOutput(line, no_output, False)
return (out, out, code)
def StartService(sc):
if sc.Controller == "systemd":
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "start", sc.Name])
if retval is not 0:
Print("Error: " + systemctl_path + " start " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" start " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_start_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_start_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_start_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "start"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + sc.Name + " start failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" start failed: " + process_stderr)
return [-1]
return [0]
def StopService(sc):
if sc.Controller == "systemd":
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "stop", sc.Name])
if retval is not 0:
Print("Error: " + systemctl_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + systemctl_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_stop_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_stop_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_stop_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "stop"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + sc.Name + " stop failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" stop failed: " + process_stderr)
return [-1]
return [0]
def GetRunLevel():
(process_stdout, process_stderr, retval) = Process([runlevel_path])
if retval is not 0:
Print("Error: " + runlevel_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + runlevel_path + " failed: " + process_stderr)
return -1
tokens = process_stdout.split(" ")
if len(tokens) is not 2:
Print("Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout, file=sys.stderr)
LG().Log('ERROR', "Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout)
return -1
return int(tokens[1])
def DetermineInitState(stdout):
if "is running" in stdout or "start/running" in stdout \
or "..running" in stdout:
return True
elif stdout.strip() == "running":
return True
elif "(running)" in stdout:
return True
else:
return False
def DetermineInitEnabled(stdout, runlevel):
tokens = stdout.split()
tokens = tokens[1:]
if runlevel > (len(tokens) - 1):
Print("runlevel " + str(runlevel) +
" not found in chkconfig", file=sys.stderr)
LG().Log(
'ERROR', "runlevel " + str(runlevel) + " not found in chkconfig")
return False
runlevel_tokens = tokens[runlevel].split(":")
if len(runlevel_tokens) is not 2:
Print(
"Unable to determine format for chkconfig run level",
file=sys.stderr)
LG().Log(
'ERROR', "Unable to determine format for chkconfig run level")
return False
if runlevel_tokens[1] == "on":
return True
else:
return False
def GetSystemdState(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name])
if retval is 0:
if '(running)' in process_stdout:
return "running"
return "stopped"
def TestSystemdState(sc):
if sc.State and sc.State != GetSystemdState(sc):
return False
return True
def GetSystemdEnabled(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "is-enabled", sc.Name])
if retval is 0:
return True
else:
return False
def TestSystemdEnabled(sc):
if sc.Enabled is not GetSystemdEnabled(sc):
return False
return True
def TestSystemd(sc):
if not SystemdExists():
return [-1]
if not TestSystemdState(sc):
return [-1]
if not TestSystemdEnabled(sc):
return [-1]
return [0]
def GetUpstartState(sc):
(process_stdout, process_stderr, retval) = Process(
[upstart_status_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_status_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_status_path +
" failed: " + process_stderr)
return ""
if (sc.Name + " start") in process_stdout:
return "running"
else:
return "stopped"
def TestUpstartState(sc):
if sc.State and sc.State != GetUpstartState(sc):
return False
return True
def GetUpstartEnabled(sc):
if os.path.isfile("/etc/init/" + sc.Name + ".conf"):
start_on_exists = False
start_on_is_enabled = False
stop_on_exists = False
stop_on_is_enabled = False
file_lines, error = ReadFile("/etc/init/" + sc.Name + ".conf")
if error is not None:
Print(
"Error reading:/etc/init/" + sc.Name + ".conf",
file=sys.stderr)
LG().Log('ERROR', "Error reading:/etc/init/" +
sc.Name + ".conf")
return "Error"
for full_line in file_lines.splitlines():
# everything after a '#' character is a comment, so strip it off
line = full_line.split("#")[0]
if "start on" in line:
start_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "start on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
start_on_is_enabled = True
else:
start_on_is_enabled = False
if "!" in specified_runlevel_digits:
start_on_is_enabled = not start_on_is_enabled
else:
return "Complex"
if "stop on" in line:
stop_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "stop on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
stop_on_is_enabled = True
else:
stop_on_is_enabled = False
if "!" in specified_runlevel_digits:
stop_on_is_enabled = not stop_on_is_enabled
else:
return "Complex"
if not start_on_exists and not stop_on_exists: # not upstart
if os.path.islink('/etc/init.d/' + sc.Name) and \
os.readlink('/etc/init.d/' + sc.Name) \
== '/lib/init/upstart-job':
# this is a 'converted' init script, check the default rc2.d
# for smylink to conf file. if so its enabled.
file_list = os.listdir('/etc/rc2.d')
for f in file_list:
f = '/etc/rc2.d/' + f
if os.path.islink(f) and os.readlink(f) == \
"../init.d/" + sc.Name:
return True
return False
(process_stdout, process_stderr, retval) = Process(
['chkconfig', sc.Name, '']) # try init style
if retval is 0:
if 'off' not in process_stdout:
return True
return False
if start_on_exists and start_on_is_enabled:
if stop_on_exists and stop_on_is_enabled:
Print("Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.", file=sys.stderr)
LG().Log('ERROR',
"Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.")
return "Complex"
else:
return True
else:
return False
Print("Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf", file=sys.stderr)
LG().Log('ERROR',
"Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf")
return False
else:
Print("Error: conf file does not exist for service named " +
sc.Name, file=sys.stderr)
LG().Log('ERROR',
"Error: conf file does not exist for service named " +
sc.Name)
return False
def TestUpstartEnabled(sc):
currently_enabled = GetUpstartEnabled(sc)
if currently_enabled == "Complex":
Print("Error: Cannot modify 'Enabled' state for service " + sc.Name +
", conf file too complex. Please use the File provider to " +
"write your own conf file for this service.", file=sys.stderr)
LG().Log('ERROR', "Error: Cannot modify 'Enabled' state for service "
+ sc.Name +
", conf file too complex. Please use the File provider to " +
" writeyour own conf file for this service.")
return False
return currently_enabled
def TestUpstart(sc):
if not UpstartExists():
return [-1]
if not TestUpstartState(sc):
return [-1]
if sc.Enabled is not TestUpstartEnabled(sc):
return [-1]
return [0]
def GetInitState(sc):
check_state_program = initd_service
# debian style init. These are missing in redhat.
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_service
if os.path.isfile(initd_service):
check_state_program = initd_service
else: # invoke the service directly
check_state_program = '/etc/init.d/'
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "status"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " status failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " status failed: ")
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "status"])
if retval is not 0:
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
if DetermineInitState(process_stdout):
return "running"
else:
return "stopped"
def TestInitState(sc):
if sc.State and sc.State != GetInitState(sc):
return False
return True
def GetInitEnabled(sc):
runlevel = GetRunLevel()
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
# A service is enabled if a symbolic link
# exists in /etc/rc${RUNLEVEL}.d/ with the name:
# S??${sc.Name}
matched_files = glob.glob(
"/etc/rc" + str(runlevel) + ".d/S??" + sc.Name)
for f in matched_files:
if os.path.islink(f):
return True
return False
else:
check_enabled_program = initd_chkconfig
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "--list", sc.Name])
if retval is not 0:
Print("Error: " + check_enabled_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" failed: " + process_stderr)
return False
if DetermineInitEnabled(process_stdout, runlevel):
return True
else:
return False
def TestInitEnabled(sc):
if sc.Enabled is not GetInitEnabled(sc):
return False
return True
def TestInit(sc):
if not InitExists():
return [-1]
if not TestInitState(sc):
return [-1]
if not TestInitEnabled(sc):
return [-1]
return [0]
def SystemdExists():
global systemctl_path
code, out = RunGetOutput('which systemctl', False, False)
if code is 0:
systemctl_path = out.strip()
return True
else:
return False
def UpstartExists():
if (os.path.isfile('/sbin/upstart-local-bridge')
or os.path.isfile('/sbin/upstart-udev-bridge')) \
and os.path.isfile(upstart_start_path) \
and os.path.isfile(upstart_stop_path) \
and os.path.isfile(upstart_status_path):
return True
else:
return False
def InitExists():
if os.path.isfile(initd_service) and os.path.isfile(initd_chkconfig):
return True
elif os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
return True
else:
return False
def ServiceExistsInSystemd(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name])
if retval is not 0:
if "Loaded: loaded" in process_stdout:
return True
else:
return False
else:
return True
def ServiceExistsInUpstart(sc):
(process_stdout, process_stderr, retval) = Process(
[upstart_status_path, sc.Name])
if retval is not 0:
return False
else:
return True
def ServiceExistsInInit(sc):
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "status"])
if "unrecognized service" in process_stderr \
or "no such service" in process_stderr \
or "not found" in process_stderr:
Print(process_stderr, file=sys.stderr)
LG().Log('INFO', process_stderr)
return False
else:
return True
def CreateSystemdService(sc):
Print("Error: systemd services cannot be created from the service " +
"provider. Please use the file provider to create a systemd " +
"conf file, then modify the service using this service provider.",
file=sys.stderr)
LG().Log('ERROR',
"Error: systemd services cannot be created from the service provider. \
Please use the file provider to create a systemd conf file, \
then modify the service using this service provider.")
return [-1]
def ModifySystemdService(sc):
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "enable", sc.Name + '.service'])
if retval is not 0:
Print("Error: " + systemctl_path + " enable " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" enable " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "disable", sc.Name + '.service'])
if retval is not 0:
Print("Error: " + systemctl_path + " disable " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" disable " + sc.Name + " failed: " + process_stderr)
return [-1]
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name + '.service'])
# retval may be non zero even if service exists for 'status'.
if 'No such file or directory' in process_stdout:
Print("Error: " + systemctl_path + " status " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" status " + sc.Name + " failed: " + process_stderr)
return [-1]
if 'Active: active' in process_stdout:
Print("Running", file=sys.stderr)
LG().Log('INFO', "Running")
if sc.State and sc.State != "running":
return StopService(sc)
else:
Print("Stopped", file=sys.stderr)
LG().Log('INFO', "Stopped")
if sc.State and sc.State != "stopped":
return StartService(sc)
return [0]
def CreateUpstartService(sc):
Print("Error: Upstart services cannot be created from the service " +
"provider. Please use the file provider to create an upstart " +
"conf file, then modify the service using this service provider.",
file=sys.stderr)
LG().Log('ERROR',
"Error: Upstart services cannot be created from the service " +
"provider. Please use the file provider to create an upstart " +
"conf file, then modify the service using this service provider.")
return [-1]
def ModifyUpstartConfFile(sc):
if os.path.isfile("/etc/init/" + sc.Name + ".conf"):
file_lines, error = ReadFile("/etc/init/" + sc.Name + ".conf")
if len(file_lines) is 0 or error is not None:
Print("Error: Conf file unable to be read for service " +
sc.Name, file=sys.stderr)
LG().Log(
'ERROR', "Error: Conf file unable to be read for service " +
sc.Name)
return False
outfile = ""
start_on_exists = False
stop_on_exists = False
for full_line in file_lines.splitlines():
line = full_line.split("#")[0]
if "start on" in line or "stop on" in line and not start_on_exists:
# If we got to this point, we can assume that we're allowed to
# modify the conf file. No need to check for a "Complex" conf
# file.
start_on_exists = True
if sc.Enabled is True:
outfile += "start on runlevel [2345]\n"
outfile += "stop on runlevel [!2345]\n"
elif sc.Enabled is False:
outfile += "stop on runlevel [0123456]\n"
elif "start on" in line or "stop on" in line and start_on_exists:
continue # its xtra now
else:
outfile += full_line + "\n"
if start_on_exists or stop_on_exists:
if WriteFile("/etc/init/" + sc.Name + ".conf", outfile) \
is not None:
Print(
"Error: Unable to write conf file for service " + sc.Name,
file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to write conf file for service " +
sc.Name)
return False
return True
else: # not an upstart service
if os.path.islink('/etc/init.d/' + sc.Name) \
and os.readlink('/etc/init.d/' + sc.Name) \
== '/lib/init/upstart-job':
# this is a 'converted' init script, check the default rc[2345].d
# for smylink to conf file. if so its enabled.
for rc in range(2, 6):
file_list = os.listdir('/etc/rc' + str(rc) + '.d')
found = False
for f in file_list:
f = '/etc/rc' + str(rc) + '.d/' + f
if os.path.islink(f) and os.readlink(f) \
== "../init.d/" + sc.Name:
found = True
break
if sc.Enabled is True:
if not found:
# create the symlink
os.symlink(
"../init.d/" + sc.Name, "/etc/rc2.d/S22" + sc.Name)
return True
else:
if found:
os.unlink(f)
return True
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
['update-rc.d', sc.Name, ' defaults'])
if retval is not 0:
Print("Error: " + process_stdout + " enable " +
sc.Name + " failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + process_stdout +
" enable " + sc.Name + " failed: " + process_stderr)
return False
else:
(process_stdout, process_stderr, retval) = Process(
['update-rc.d -f ', sc.Name, ' remove'])
if retval is not 0:
Print("Error: " + process_stdout + " disable " +
sc.Name + " failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + process_stdout +
" disable " + sc.Name + " failed: " + process_stderr)
return False
return True
def ModifyUpstartService(sc):
if sc.Enabled is not TestUpstartEnabled(sc):
if not ModifyUpstartConfFile(sc):
Print("Error: Failed to modify upstart conf file", file=sys.stderr)
LG().Log('ERROR', "Error: Failed to modify upstart conf file")
return [-1]
if sc.State == "running":
(process_stdout, process_stderr, retval) = Process(
[upstart_start_path, sc.Name])
if retval is not 0:
if "Job is already running" not in process_stderr:
Print("Error: " + upstart_start_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_start_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + upstart_start_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_start_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.State == "stopped":
(process_stdout, process_stderr, retval) = Process(
[upstart_stop_path, sc.Name])
if retval is not 0:
if "Unknown instance" not in process_stderr:
Print("Error: " + upstart_stop_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_stop_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + upstart_stop_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_stop_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
return [0]
def CreateInitService(sc):
(process_stdout, process_stderr, retval) = Process(
[lsb_install_initd, sc.Name])
if retval is not 0:
Print("Error: " + lsb_install_initd + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + lsb_install_initd +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
return ModifyInitService(sc)
def ModifyInitService(sc):
check_state_program = initd_service
check_enabled_program = initd_chkconfig
# debian style init. These are missing in redhat.
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
if os.path.isfile(initd_service):
check_state_program = initd_service
else: # invoke the service directly
check_state_program = '/etc/init.d/'
check_enabled_program = initd_updaterc
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "enable"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " enable failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " enable failed: " + process_stderr)
# try 'defaults'
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
if 'already exist' in process_stdout: # we need to remove them first
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
# it should work now
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "disable"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " disable failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " disable failed: " + process_stderr)
# try remove
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
else:
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, sc.Name, "on"])
if retval is not 0:
Print("Error: " + check_enabled_program + " " + sc.Name +
" on failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" " + sc.Name + " on failed: " + process_stderr)
# try 'defaults'
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
if 'already exist' in process_stdout: # we need to remove them first
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
# it should work now
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, sc.Name, "off"])
if retval is not 0:
Print("Error: " + check_enabled_program + " " + sc.Name +
" off failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" " + sc.Name + " off failed: " + process_stderr)
# try remove
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
if sc.State == "running":
# don't try to read stdout or stderr as 'service start' comand
# re-directs them, causing a hang in subprocess.communicate()
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "start"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " start failed: ")
return [-1]
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "start"], True)
if retval is not 0:
Print("Error: " + check_state_program + " " +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " start failed: ")
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + check_state_program + " " +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " start failed: ")
return [-1]
elif sc.State == "stopped":
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "stop"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " stop failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " stop failed: ")
return [-1]
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "stop"])
if retval is not 0:
Print("Error: " + check_state_program + " " + sc.Name +
" stop failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " stop failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + check_state_program + " " + sc.Name +
" stop failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " stop failed: " + process_stderr)
return [-1]
return [0]
def IsServiceRunning(sc):
time.sleep(1)
cmd = 'ps -ef | grep -v grep | grep -E ".*( ' + \
sc.Name + '|/' + sc.Name + ')(\..*?|.?)( |$)"'
code, out = RunGetOutput(cmd, False, False)
if code is not 0:
return False
return True
def Set(Name, Controller, Enabled, State):
ShowMof('SET', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
if sc.Controller == "systemd":
if SystemdExists() is True:
if ServiceExistsInSystemd(sc):
return ModifySystemdService(sc)
else:
return CreateSystemdService(sc)
elif sc.Controller == "upstart":
if UpstartExists() is True:
if ServiceExistsInUpstart(sc):
return ModifyUpstartService(sc)
else:
return CreateUpstartService(sc)
elif sc.Controller == "init":
if InitExists() is True:
if ServiceExistsInInit(sc):
return ModifyInitService(sc)
else:
return CreateInitService(sc)
return [-1]
def Test(Name, Controller, Enabled, State):
ShowMof('TEST', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
if sc.Controller == "systemd":
return TestSystemd(sc)
elif sc.Controller == "upstart":
return TestUpstart(sc)
elif sc.Controller == "init":
return TestInit(sc)
else:
Print("Invalid service controller (" + sc.Controller +
") specified for service: " + sc.Name, file=sys.stderr)
LG().Log('ERROR', "Invalid service controller (" +
sc.Controller + ") specified for service: " + sc.Name)
return [-1]
return [-1]
def Get(Name, Controller, Enabled, State):
ShowMof('GET', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
Path = ""
exit_code = 0
if not sc.Controller:
Print("Error: Controller not specified.", file=sys.stderr)
LG().Log('ERROR', "Error: Controller not specified.")
exit_code = -1
elif sc.Controller == "systemd":
if not ServiceExistsInSystemd(sc):
Print("Error: Unable to find service named " +
sc.Name + " in systemd.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in systemd.")
exit_code = -1
else:
Enabled = GetSystemdEnabled(sc)
State = GetSystemdState(sc)
Path = "/usr/lib/systemd/system/" + sc.Name + ".service"
elif sc.Controller == "upstart":
if not ServiceExistsInUpstart(sc):
Print("Error: Unable to find service named " +
sc.Name + " in upstart.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in upstart.")
exit_code = -1
else:
temp = GetUpstartEnabled(sc)
if temp is False:
Enabled = False
else:
# When GetUpstartEnabled returns "Complex", we assume that it
# is enabled (and we won't modify it).
Enabled = True
State = GetUpstartState(sc)
Path = "/etc/init/" + sc.Name + ".conf"
elif sc.Controller == "init":
if not ServiceExistsInInit(sc):
Print("Error: Unable to find service named " +
sc.Name + " in init.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in init.")
exit_code = -1
else:
Enabled = GetInitEnabled(sc)
State = GetInitState(sc)
Path = "/etc/init.d/" + sc.Name
GetOne(sc)
return [exit_code, Name, Controller, Enabled, State, Path, sc.Description, sc.Runlevels]
def GetOne(sc):
GetAll(sc)
if len(sc.services_list):
sc.Description = sc.services_list[0]['Description']
sc.Runlevels = sc.services_list[0]['Runlevels']
def GetAll(sc):
if sc.Controller == 'init':
return InitdGetAll(sc)
if sc.Controller == 'systemd':
return SystemdGetAll(sc)
if sc.Controller == 'upstart':
return UpstartGetAll(sc)
def GetRunlevels(sc, Name):
if sc.runlevels_d == None:
sc.runlevels_d = {}
cmd = "file /etc/rc*.d/* | grep link | awk '{print $5,$1}' | sort"
code, out = RunGetOutput(cmd, False, False)
for line in out.splitlines():
line = line.replace("'", '')
srv = line.split(' ')[0]
rl = line.split(' ')[1]
n = os.path.basename(srv)
if n not in sc.runlevels_d.keys():
sc.runlevels_d[n] = {}
if 'Path' not in sc.runlevels_d[n].keys():
sc.runlevels_d[n]['Path'] = srv.replace('..', '/etc')
if 'Runlevels' not in sc.runlevels_d[n].keys():
sc.runlevels_d[n]['Runlevels'] = ''
s = 'off'
if rl[11].lower() == 's':
s = 'on'
sc.runlevels_d[n]['Runlevels'] += rl[7] + ':' + s + ' '
if Name in sc.runlevels_d.keys():
return sc.runlevels_d[Name]
return None
def SystemdGetAll(sc):
d = {}
if os.system('which systemctl') != 0:
Print("Error: 'Controller' = " + sc.Controller +
" is incorrectly specified.", file=sys.stderr)
LG().Log('ERROR', "Error: 'Controller' = " +
sc.Controller + " is incorrectly specified.")
return False
Name = sc.Name
if '*' not in Name and '?' not in Name and len(Name) > 0:
Name = Name.replace('.service', '')
Name += '.service'
# Do the commands work?
# There may be no error detected in our multi-pipe command below.
# To keep from returning garbage, we must test the commands.
# RunGetOutput(chk_err = True) will log the error message here if it
# occurs.
cmd = 'systemctl -a list-unit-files ' + Name
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
sname = ''
# Get the last service name from the output.
m = re.search(r'.*?\n(.*?)[.]service.*?\n', txt, re.M)
if m is not None:
sname = m.group(1)
cmd = 'systemctl -a --no-pager --no-legend -p "Names,WantedBy,Description,SubState,FragmentPath,UnitFileState" show ' + sname
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = 'systemctl -a list-unit-files ' + Name + '| grep \.service | grep -v "@" | awk \'{print $1}\' | xargs systemctl -a --no-pager --no-legend -p "Names,WantedBy,Description,SubState,FragmentPath,UnitFileState" show'
code, txt = RunGetOutputNoStderr(cmd, False, False)
txt=txt.replace('\n\n','@@')
txt=txt.replace('\n','|')
services=txt.split('@@')
subs=re.compile(r'(.*?=)')
for srv in services:
if len(srv) == 0:
continue
s=srv.split('|')
d['Name'] = subs.sub('',s[0].replace('.service',''))
d['Controller'] = sc.Controller
d['Description'] =subs.sub('',s[2])
d['State'] = subs.sub('',s[3])
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = subs.sub('',s[4])
d['Enabled'] = 'enabled' in subs.sub('',s[5])
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
rld=GetRunlevels(sc,d['Name'])
if rld != None and 'Runlevels' in rld.keys():
d['Runlevels'] = rld['Runlevels']
else:
d['Runlevels'] = subs.sub('',s[1])
sc.services_list.append(copy.deepcopy(d))
return True
def UpstartGetAll(sc):
d={}
names={}
if os.system('which initctl') != 0:
Print("Error: 'Controller' = " + sc.Controller + " is incorrectly specified.", file=sys.stderr)
LG().Log('ERROR', "Error: 'Controller' = " + sc.Controller + " is incorrectly specified.")
return False
# Do the commands work?
# There may be no error detected in our multi-pipe command below.
# To keep from returning garbage, we must test the commands.
# RunGetOutput(chk_err = True) will log the error message here if it occurs.
cmd = 'initctl list'
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
cmd = initd_service + ' --status-all'
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = "initctl list | sed 's/[(].*[)] //g' | tr ', ' ' ' | awk '{print $1,$2}'"
code, txt = RunGetOutputNoStderr(cmd, False, False)
services = txt.splitlines()
cmd = initd_service + " --status-all &> /tmp/tmpfile ; cat /tmp/tmpfile ; rm /tmp/tmpfile"
code, txt = RunGetOutputNoStderr(cmd, False, False)
txt = txt.replace('[','')
txt = txt.replace(']','')
services.extend(txt.splitlines())
for srv in services:
if len(srv) == 0:
continue
s=srv.split()
if len(s[0]) == 1: #swap them.
s.reverse()
d['Name'] = s[0]
if len(sc.Name) and not fnmatch.fnmatch(d['Name'],sc.Name):
continue
if d['Name'] in names.keys():
continue
names[d['Name']] = None
d['Controller'] = sc.Controller
d['Description'] = ''
d['State'] = 'stopped'
if 'running' in s[1] or '+' in s[1]:
d['State'] = 'running'
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = ''
if os.path.exists('/etc/init.d/' + s[0]):
d['Path'] = '/etc/init.d/' + s[0]
elif os.path.exists('/etc/init/' + s[0] + '.conf'):
d['Path'] = '/etc/init/' + s[0] + '.conf'
# 'initctl list' won't show disabled services
d['Enabled'] = True
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
if len(s[1]) > 1:
cmd = 'initctl show-config ' + d['Name'] + ' | grep -E "start |stop " | tr "\n" " " | tr -s " " '
code, out = RunGetOutputNoStderr(cmd, False, False)
d['Runlevels'] = out[1:]
else:
rld=GetRunlevels(sc,d['Name'])
if rld != None and 'Runlevels' in rld.keys():
d['Runlevels'] = rld['Runlevels']
sc.services_list.append(copy.deepcopy(d))
return True
def InitdGetAll(sc):
d={}
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
initd_service_status = 'sudo /opt/microsoft/omsconfig/Scripts/OMSServiceStat.sh'
status_postfix = ''
initd_service_status_all = 'sudo /opt/microsoft/omsconfig/Scripts/OMSServiceStatAll.sh'
else:
initd_service_status = initd_service
status_postfix = ' status'
initd_service_status_all = initd_service + ' --status-all '
if os.path.exists(initd_chkconfig):
# SLES 11-SP4 chkconfig can return error code on success,
# so don't check chkconfig error code if this is the case.
if os.path.exists('/etc/SuSE-release'):
txt = open('/etc/SuSE-release','r').read()
s=r'.*?VERSION.*?=(.*?)\n.*?PATCHLEVEL.*?=(.*?)\n'
m = re.search(s, txt, re.M)
if m != None:
if not (int(m.group(1)) == 11 and int(m.group(2)) == 4 ) :
# Does the command work?
# There may be no error detected in our multi-pipe command below.
# To keep from returning garbage, we must test the command.
# RunGetOutput(chk_err = True) will log the error message here if it occurs.
cmd = initd_chkconfig + ' --list '
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = initd_chkconfig + ' --list | grep on | grep -v based'
code, txt = RunGetOutputNoStderr(cmd, False, False)
services=txt.splitlines()
for srv in services:
if len(srv) == 0:
continue
s=srv.split()
d['Name'] = s[0]
if len(sc.Name) and not fnmatch.fnmatch(d['Name'],sc.Name):
continue
d['Controller'] = sc.Controller
d['Description'] = ''
d['State'] = 'stopped'
cmd = initd_service_status + ' ' + s[0] + status_postfix
code, txt = RunGetOutputNoStderr(cmd, False, False)
if 'running' in txt:
d['State'] = 'running'
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = ''
if os.path.exists('/etc/init.d/' + s[0]):
d['Path'] = '/etc/init.d/' + s[0]
d['Enabled'] = ':on' in srv
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
d['Runlevels'] = reduce(lambda x, y: x + ' ' + y, s[1:])
sc.services_list.append(copy.deepcopy(d))
else:
# Does the command work?
# There may be no error detected in our multi-statement command below.
# To keep from returning garbage, we must test the command.
# RunGetOutput(chk_err = True) will log the error message here if it occurs.
cmd = initd_service_status_all
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = initd_service_status_all + ' &> /tmp/tmpfile ; cat /tmp/tmpfile ; rm /tmp/tmpfile'
code, txt = RunGetOutputNoStderr(cmd, False, False)
txt = txt.replace('[','')
txt = txt.replace(']','')
services = txt.splitlines()
for srv in services:
if len(srv) == 0:
continue
s=srv.split()
d['Name'] = s[1]
if len(sc.Name) and not fnmatch.fnmatch(d['Name'],sc.Name):
continue
d['Controller'] = sc.Controller
d['Description'] = ''
d['State'] = 'stopped'
if '+' in s[0]:
d['State'] = 'running'
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = ''
if os.path.exists('/etc/init.d/' + s[1]):
d['Path'] = '/etc/init.d/' + s[1]
elif os.path.exists('/etc/init/' + s[1] + '.conf'):
d['Path'] = '/etc/init/' + s[1] + '.conf'
rld=GetRunlevels(sc,d['Name'])
if rld != None and 'Runlevels' in rld.keys():
d['Runlevels'] = rld['Runlevels']
d['Enabled'] = 'on' in d['Runlevels']
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
sc.services_list.append(copy.deepcopy(d))
return True
def GetController():
if UpstartExists():
return 'upstart'
if SystemdExists():
return 'systemd'
if InitExists():
return 'init'
Print('ERROR: Unable to determine Controller.')
LG().Log('ERROR', 'Unable to determine Controller.')
return ''
class ServiceContext:
def __init__(self, Name, Controller, Enabled, State):
self.services_list=[]
self.runlevels_d=None
self.Name = Name
self.Controller = Controller
self.Enabled = Enabled
self.State = State
self.Path = ''
self.Description = ''
self.Runlevels = ''
self.FilterEnabled = False
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
third_party/chromite/appengine/cq_stats/manage.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""(Semi-)Autogenerated django module for app management."""
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cq_stats.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
platform/core/polyaxon/tracker/events/search.py
|
import tracker
from events.registry import search
tracker.subscribe(search.SearchCreatedEvent)
tracker.subscribe(search.SearchDeletedEvent)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
handlers/bloog/timings.py
|
# The MIT License
#
# Copyright (c) 2008 William T. Katz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
timings.py
Created by William Katz on 2008-05-04.
Copyright (c) 2008 Publishare LLC. Distributed under MIT License.
"""
__author__ = "William T. Katz"
# Global that stores timing runs, all keyed to incoming url path.
# Note that since this is a global, you'll only get stats from the
# currently visited server and it could be reset. The timing
# utility is not meant to be comprehensive but only a hack that
# doesn't interfere with memcached stats.
TIMINGS = {}
import time
import urlparse
import os
from handlers import restful
from utils import authorized
import view
def start_run():
url = os.environ['PATH_INFO']
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
global TIMINGS
if not path in TIMINGS:
TIMINGS[path] = {
"runs": 0,
"duration": 0.0,
"min_time": None,
"max_time": None,
"mutex_lock": False
}
timing = TIMINGS[path]
if not timing["mutex_lock"]:
timing["mutex_lock"] = True
timing["start_time"] = time.time()
return path
return None
def stop_run(path):
global TIMINGS
if path and path in TIMINGS:
timing = TIMINGS[path]
elapsed_time = time.time() - timing["start_time"]
timing["duration"] += elapsed_time
timing["runs"] += 1
if (not timing["min_time"]) or timing["min_time"] > elapsed_time:
timing["min_time"] = elapsed_time
if (not timing["max_time"]) or timing["max_time"] < elapsed_time:
timing["max_time"] = elapsed_time
timing["mutex_lock"] = False
class TimingHandler(restful.Controller):
@authorized.role("admin")
def get(self):
global TIMINGS
stats = []
total_time = 0.0
avg_speed = 0.0
total_calls = 0
total_full_renders = 0
for key in TIMINGS:
full_renders = 0
if key in view.NUM_FULL_RENDERS:
full_renders = view.NUM_FULL_RENDERS[key]
total_full_renders += full_renders
url_timing = TIMINGS[key]
if url_timing["runs"] > 0:
url_stats = url_timing.copy()
url_stats.update({'url': key,
'avg_speed': url_timing["duration"] /
url_timing["runs"],
'full_renders': full_renders})
stats.append(url_stats)
total_time += url_timing["duration"]
total_calls += url_timing["runs"]
if total_calls > 0:
avg_speed = total_time / total_calls
view.ViewPage(cache_time=0).render(self, {"stats": stats,
"avg_speed": avg_speed,
"total_time": total_time,
"total_calls": total_calls,
"total_full_renders":
total_full_renders})
@authorized.role("admin")
def delete(self):
global TIMINGS
TIMINGS = {}
|
[] |
[] |
[
"PATH_INFO"
] |
[]
|
["PATH_INFO"]
|
python
| 1 | 0 | |
tests/portfolio_projects/forms_test.py
|
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolio.settings')
django.setup()
import base64
import tempfile
from django.test import TestCase, override_settings
from portfolio.portfolio_projects.forms import CommentForm, ProjectForm
from django.core.files.uploadedfile import InMemoryUploadedFile
from io import BytesIO
class TestForms(TestCase):
def test_comment_form_valid_data(self):
form = CommentForm({
'text': 'Text',
})
self.assertTrue(form.is_valid())
def test_comment_form_has_no_data(self):
form = CommentForm({
'text': '',
})
self.assertFalse(form.is_valid())
def test_project_form_has_no_data(self):
form = ProjectForm({})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 4)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_project_form_valid_data(self):
image = InMemoryUploadedFile(
BytesIO(base64.b64decode(TEST_IMAGE)),
field_name='tempfile',
name='tempfile.png',
content_type='image/png',
size=len(TEST_IMAGE),
charset='utf-8',
)
form = ProjectForm({
'title': 'Title1',
'description': 'Description1',
'link': 'https://www.google.com/',
}, {
'image': image,
})
self.assertTrue(form.is_valid())
TEST_IMAGE = '''
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAAACXBI
WXMAAABIAAAASABGyWs+AAAACXZwQWcAAAAQAAAAEABcxq3DAAABfElEQVQ4y52TvUuCURTGf5Zg
9goR9AVlUZJ9KURuUkhIUEPQUIubRFtIJTk0NTkUFfgntAUt0eBSQwRKRFSYBYFl1GAt901eUYuw
QTLM1yLPds/zPD/uPYereYjHcwD+tQ3+Uys+LwCah3g851la/lf4qwKb61Sn3z5WFUWpCHB+GUGb
SCRIpVKqBkmSAMrqsViMqnIiwLx7HO/U+6+30GYyaVXBP1uHrfUAWvWMWiF4+qoOUJLJkubYcDs2
S03hvODSE7564ek5W+Kt+tloa9ax6v4OZ++jZO+jbM+pD7oE4HM1lX1vYNGoDhCyQMiCGacRm0Vf
EM+uiudjke6YcRoLfiELNB2dXTkAa08LPlcT2fpJAMxWZ1H4NnKITuwD4Nl6RMgCAE1DY3PuyyQZ
JLrNvZhMJgCmJwYB2A1eAHASDiFkQUr5Xn0RoJLSDg7ZCB0fVRQ29/TmP1Nf/0BFgL2dQH4LN9dR
7CMOaiXDn6FayYB9xMHeTgCz1cknd+WC3VgTorUAAAAldEVYdGNyZWF0ZS1kYXRlADIwMTAtMTIt
MjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2RpZnktZGF0ZQAyMDEwLTEyLTI2VDE0OjQ5
OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggolQTkcNChoKAAAADUlIRFIAAAAQAAAAEAgGAAAAH/P/
YQAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAAAl2cEFnAAAAEAAAABAA
XMatwwAAAhdJREFUOMuVk81LVFEYxn/3zocfqVebUbCyTLyYRYwD0cemCIRyUVToLloERUFBbYpo
E7WIFv0TLaP6C2Y17oYWWQxRMwo5OUplkR/XOefMuW8LNYyZLB94eOE5L79zzns4johIPp/n+YtX
fPn6jaq1bKaI65LY3sHohXOk02mcNxMT8vjJU5TWbEUN8Ti3bl4n0tLW/qBcniW0ltBaxFrsWl3P
7IZ8PdNa82m6RPTDxyLGmLq7JDuaqVQCllbqn6I4OUU0CJYJw7BmMR6LcPvyURbLGR49q/71KlGj
dV3AlbEhBnog3mo5e8Tycrz+cKPamBrAiUOdnD/ZhlFziKpw7RS8LVry01IDcI3WbHRXu8OdS524
pgx6BlkJEKW4PxrSFP2z12iNq1UFrTVaaxDNw6vttDXMg/2O2AXC5UUkWKI7vsDdM+Z3X9Ws2tXG
YLTCaMWNMY8DfREAFpcUkzPC1JzL8kKAGM3xvoDD+1uJVX+ilEIptTpECUP8PXEGB/rIzw/iNPXj
de1jML0Xay3l6QKfZyewP95x8dhr7r0HpSoAODt7dktoQ0SEpsZGent78f1+fN/H9/sxxlAoFCkU
CxQKRUqlEkppXNddBXTv2CXrtH/JofYVoqnUQbLZ8f/+A85aFWAolYJcLiee50ksFtuSm7e1SCaT
EUREcrmcnB4ZkWQyKZ7nbepEIiHDw8OSzWZFROQX6PpZFxAtS8IAAAAldEVYdGNyZWF0ZS1kYXRl
ADIwMTAtMTItMjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2RpZnktZGF0ZQAyMDEwLTEy
LTI2VDE0OjQ5OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggolQTkcNChoKAAAADUlIRFIAAAAQAAAA
EAgGAAAAH/P/YQAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAASAAAAEgARslrPgAAAAl2cEFn
AAAAEAAAABAAXMatwwAAAo9JREFUOMuNks1rVGcUxn/ve+9kUuOdfIzamNHEMK3RVILQQAuCWURo
rSAtbsV20T/EP6O7FtxkkYWQKK7F4Kb1C6yoSVrNdDIm1YTMjDP3vfc9p4ubZEYopQceDhwOD89z
zmO89/rw0SNu3b5D5a8q3gv7ZXa7dkY2sIwMf8w3X3/F9PTnhL/+9oCff7nBeq2GMYb/U5sbm1TX
a8TOEQwMHbq+vLKKqqIiiAh+r3tBvKBds72der1OtVolfP78BWmadmnNVKgqI0cOkiRtNrc9Zt9H
x9fK6iphs/keVflAoqpSHOzjh+8maL59yk83WzRa8G8OwzRxiHQIFOjJBXw7O8b0qV50K2H1tWf+
riCiHRbNFIUucYgoZu/Yqlz44iiXzh3EpJuE0uLKl57lNc/93wVjOyYyApeguwpElTOf9HH1YkSU
e0O72cC/b1DMK9/PGP5c97zaUGwXg01cjHMxcRwz0Cf8ePkAJ47U0eRvSLehtYM06pw+1OTauZje
wBG7mCTJEDqX3eCjvOXqxQGmTwXUmwlxmmdrpw+z0ybiHXnbYqasvDgbcGPJEvvsHKFzDp96Tgz3
cvjwMM/efsaBwZP0D39KabKEpgnbG3/wrvaU5psnHD/6mMF8jcqWwRgwpWOjKiLkQkOhv5+xsTLl
cpnR0WOUSiVEhLVKhbXXa7xcXqHyaoV6o0Hqd1MxUjqu7XYLMFkaNXtXYC09+R5UwbkYEcVaizFm
P/LWGsLJydMs3VvCWkP3gzxK7OKu7Bl81/tEhKmpKVhYWNCJiQkNglDDMKdhLpf1/0AQhDo+Pq5z
c3NKmqa6uLios7MXtFgsahRFGhUKHUS7KBQ0iiIdGhrS8+dndH5+XpMk0X8AMTVx/inpU4cAAAAl
dEVYdGNyZWF0ZS1kYXRlADIwMTAtMTItMjZUMTQ6NDk6MjErMDk6MDAHHBB1AAAAJXRFWHRtb2Rp
ZnktZGF0ZQAyMDEwLTEyLTI2VDE0OjQ5OjIxKzA5OjAwWK1mQQAAAABJRU5ErkJggg==
'''.strip()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
internal/db/remote/changes/changes.go
|
package changes
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"regexp"
"strconv"
"strings"
"github.com/charmbracelet/bubbles/progress"
"github.com/charmbracelet/bubbles/spinner"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/stdcopy"
pgx "github.com/jackc/pgx/v4"
"github.com/muesli/reflow/wrap"
"github.com/supabase/cli/internal/utils"
)
// TODO: Handle cleanup on SIGINT/SIGTERM.
func Run() error {
// Sanity checks.
{
if err := utils.AssertDockerIsRunning(); err != nil {
return err
}
if err := utils.LoadConfig(); err != nil {
return err
}
}
url := os.Getenv("SUPABASE_REMOTE_DB_URL")
if url == "" {
return errors.New("Remote database is not set. Run " + utils.Aqua("supabase db remote set") + " first.")
}
s := spinner.NewModel()
s.Spinner = spinner.Dot
s.Style = lipgloss.NewStyle().Foreground(lipgloss.Color("205"))
p := tea.NewProgram(model{spinner: s})
errCh := make(chan error, 1)
go func() {
errCh <- run(p, url)
p.Send(tea.Quit())
}()
if err := p.Start(); err != nil {
return err
}
if errors.Is(ctx.Err(), context.Canceled) {
return errors.New("Aborted " + utils.Aqua("supabase db remote changes") + ".")
}
if err := <-errCh; err != nil {
return err
}
fmt.Println(diff)
return nil
}
const (
netId = "supabase_db_remote_changes_network"
dbId = "supabase_db_remote_changes_db"
differId = "supabase_db_remote_changes_differ"
)
var (
ctx, cancelCtx = context.WithCancel(context.Background())
diff string
)
func run(p *tea.Program, url string) error {
_, _ = utils.Docker.NetworkCreate(
ctx,
netId,
types.NetworkCreate{
CheckDuplicate: true,
Labels: map[string]string{
"com.supabase.cli.project": utils.ProjectId,
"com.docker.compose.project": utils.ProjectId,
},
},
)
defer utils.Docker.NetworkRemove(context.Background(), netId) //nolint:errcheck
defer utils.DockerRemoveAll()
conn, err := pgx.Connect(ctx, url)
if err != nil {
return err
}
defer conn.Close(context.Background())
p.Send(utils.StatusMsg("Pulling images..."))
// Pull images.
{
if _, _, err := utils.Docker.ImageInspectWithRaw(ctx, "docker.io/"+utils.DbImage); err != nil {
out, err := utils.Docker.ImagePull(
ctx,
"docker.io/"+utils.DbImage,
types.ImagePullOptions{},
)
if err != nil {
return err
}
if err := utils.ProcessPullOutput(out, p); err != nil {
return err
}
}
if _, _, err := utils.Docker.ImageInspectWithRaw(ctx, "docker.io/"+utils.DifferImage); err != nil {
out, err := utils.Docker.ImagePull(
ctx,
"docker.io/"+utils.DifferImage,
types.ImagePullOptions{},
)
if err != nil {
return err
}
if err := utils.ProcessPullOutput(out, p); err != nil {
return err
}
}
}
// 1. Assert `supabase/migrations` and `schema_migrations` are in sync.
if rows, err := conn.Query(ctx, "SELECT version FROM supabase_migrations.schema_migrations ORDER BY version"); err != nil {
return err
} else {
remoteMigrations := []string{}
for rows.Next() {
var version string
if err := rows.Scan(&version); err != nil {
return err
}
remoteMigrations = append(remoteMigrations, version)
}
localMigrations, err := os.ReadDir("supabase/migrations")
if err != nil {
return err
}
conflictErr := errors.New("The remote database's migration history is not in sync with the contents of " + utils.Bold("supabase/migrations") + `. Resolve this by:
- Updating the project from version control to get the latest ` + utils.Bold("supabase/migrations") + `,
- Pushing unapplied migrations with ` + utils.Aqua("supabase db push") + `,
- Or failing that, manually inserting/deleting rows from the supabase_migrations.schema_migrations table on the remote database.`)
if len(remoteMigrations) != len(localMigrations) {
return conflictErr
}
re := regexp.MustCompile(`([0-9]+)_.*\.sql`)
for i, remoteTimestamp := range remoteMigrations {
localTimestamp := re.FindStringSubmatch(localMigrations[i].Name())[1]
if localTimestamp == remoteTimestamp {
continue
}
return conflictErr
}
}
// 2. Create shadow db and run migrations.
p.Send(utils.StatusMsg("Creating shadow database..."))
{
cmd := []string{}
if dbVersion, err := strconv.ParseUint(utils.DbVersion, 10, 64); err != nil {
return err
} else if dbVersion >= 140000 {
cmd = []string{"postgres", "-c", "config_file=/etc/postgresql/postgresql.conf"}
}
if _, err := utils.DockerRun(
ctx,
dbId,
&container.Config{
Image: utils.DbImage,
Env: []string{"POSTGRES_PASSWORD=postgres"},
Cmd: cmd,
Labels: map[string]string{
"com.supabase.cli.project": utils.ProjectId,
"com.docker.compose.project": utils.ProjectId,
},
},
&container.HostConfig{NetworkMode: netId},
); err != nil {
return err
}
out, err := utils.DockerExec(ctx, dbId, []string{
"sh", "-c", "until pg_isready --host $(hostname --ip-address); do sleep 0.1; done " +
`&& psql postgresql://postgres:postgres@localhost/postgres <<'EOSQL'
BEGIN;
` + utils.GlobalsSql + `
COMMIT;
EOSQL
`,
})
if err != nil {
return err
}
var errBuf bytes.Buffer
if _, err := stdcopy.StdCopy(io.Discard, &errBuf, out); err != nil {
return err
}
if errBuf.Len() > 0 {
return errors.New("Error starting shadow database: " + errBuf.String())
}
{
out, err := utils.DockerExec(ctx, dbId, []string{
"psql", "postgresql://postgres:postgres@localhost/postgres", "-c", utils.InitialSchemaSql,
})
if err != nil {
return err
}
var errBuf bytes.Buffer
if _, err := stdcopy.StdCopy(io.Discard, &errBuf, out); err != nil {
return err
}
if errBuf.Len() > 0 {
return errors.New("Error starting shadow database: " + errBuf.String())
}
}
{
extensionsSql, err := os.ReadFile("supabase/extensions.sql")
if errors.Is(err, os.ErrNotExist) {
// skip
} else if err != nil {
return err
} else {
out, err := utils.DockerExec(ctx, dbId, []string{
"psql", "postgresql://postgres:postgres@localhost/postgres", "-c", string(extensionsSql),
})
if err != nil {
return err
}
var errBuf bytes.Buffer
if _, err := stdcopy.StdCopy(io.Discard, &errBuf, out); err != nil {
return err
}
if errBuf.Len() > 0 {
return errors.New("Error starting shadow database: " + errBuf.String())
}
}
}
migrations, err := os.ReadDir("supabase/migrations")
if err != nil {
return err
}
for i, migration := range migrations {
// NOTE: To handle backward-compatibility. `<timestamp>_init.sql` as
// the first migration (prev versions of the CLI) is deprecated.
if i == 0 {
matches := regexp.MustCompile(`([0-9]{14})_init\.sql`).FindStringSubmatch(migration.Name())
if len(matches) == 2 {
if timestamp, err := strconv.ParseUint(matches[1], 10, 64); err != nil {
return err
} else if timestamp < 20211209000000 {
continue
}
}
}
p.Send(utils.StatusMsg("Applying migration " + utils.Bold(migration.Name()) + "..."))
content, err := os.ReadFile("supabase/migrations/" + migration.Name())
if err != nil {
return err
}
out, err := utils.DockerExec(ctx, dbId, []string{
"psql", "postgresql://postgres:postgres@localhost/postgres", "-c", string(content),
})
if err != nil {
return err
}
var errBuf bytes.Buffer
if _, err := stdcopy.StdCopy(io.Discard, &errBuf, out); err != nil {
return err
}
if errBuf.Len() > 0 {
return errors.New("Error starting shadow database: " + errBuf.String())
}
}
}
// 3. Diff remote db (source) & shadow db (target) and print it.
{
p.Send(utils.StatusMsg("Generating changes on the remote database since the last migration..."))
out, err := utils.DockerRun(
ctx,
differId,
&container.Config{
Image: utils.DifferImage,
Entrypoint: []string{
"sh", "-c", "/venv/bin/python3 -u cli.py --json-diff" +
" '" + url + "'" +
" 'postgresql://postgres:postgres@" + dbId + ":5432/postgres'",
},
Labels: map[string]string{
"com.supabase.cli.project": utils.ProjectId,
"com.docker.compose.project": utils.ProjectId,
},
},
&container.HostConfig{NetworkMode: container.NetworkMode(netId)},
)
if err != nil {
return err
}
diffBytes, err := utils.ProcessDiffOutput(p, out)
if err != nil {
return err
}
diff = string(diffBytes)
}
return nil
}
type model struct {
spinner spinner.Model
status string
progress *progress.Model
psqlOutputs []string
width int
}
func (m model) Init() tea.Cmd {
return spinner.Tick
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.Type {
case tea.KeyCtrlC:
// Stop future runs
cancelCtx()
// Stop current runs
utils.DockerRemoveAll()
return m, tea.Quit
default:
return m, nil
}
case tea.WindowSizeMsg:
m.width = msg.Width
return m, nil
case spinner.TickMsg:
spinnerModel, cmd := m.spinner.Update(msg)
m.spinner = spinnerModel
return m, cmd
case progress.FrameMsg:
if m.progress == nil {
return m, nil
}
tmp, cmd := m.progress.Update(msg)
progressModel := tmp.(progress.Model)
m.progress = &progressModel
return m, cmd
case utils.StatusMsg:
m.status = string(msg)
return m, nil
case utils.ProgressMsg:
if msg == nil {
m.progress = nil
return m, nil
}
if m.progress == nil {
progressModel := progress.NewModel(progress.WithDefaultGradient())
m.progress = &progressModel
}
return m, m.progress.SetPercent(*msg)
case utils.PsqlMsg:
if msg == nil {
m.psqlOutputs = []string{}
return m, nil
}
m.psqlOutputs = append(m.psqlOutputs, *msg)
if len(m.psqlOutputs) > 5 {
m.psqlOutputs = m.psqlOutputs[1:]
}
return m, nil
default:
return m, nil
}
}
func (m model) View() string {
var progress string
if m.progress != nil {
progress = "\n\n" + m.progress.View()
}
var psqlOutputs string
if len(m.psqlOutputs) > 0 {
psqlOutputs = "\n\n" + strings.Join(m.psqlOutputs, "\n")
}
return wrap.String(m.spinner.View()+m.status+progress+psqlOutputs, m.width)
}
|
[
"\"SUPABASE_REMOTE_DB_URL\""
] |
[] |
[
"SUPABASE_REMOTE_DB_URL"
] |
[]
|
["SUPABASE_REMOTE_DB_URL"]
|
go
| 1 | 0 | |
agent/wsclient/client.go
|
// Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package wsclient wraps the generated aws-sdk-go client to provide marshalling
// and unmarshalling of data over a websocket connection in the format expected
// by backend. It allows for bidirectional communication and acts as both a
// client-and-server in terms of requests, but only as a client in terms of
// connecting.
package wsclient
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"reflect"
"strings"
"sync"
"time"
"crypto/tls"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/utils/cipher"
"github.com/aws/amazon-ecs-agent/agent/wsclient/wsconn"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/cihub/seelog"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
)
const (
// ServiceName defines the service name for the agent. This is used to sign messages
// that are sent to the backend.
ServiceName = "ecs"
// wsConnectTimeout specifies the default connection timeout to the backend.
wsConnectTimeout = 30 * time.Second
// wsHandshakeTimeout specifies the default handshake timeout for the websocket client
wsHandshakeTimeout = wsConnectTimeout
// readBufSize is the size of the read buffer for the ws connection.
readBufSize = 4096
// writeBufSize is the size of the write buffer for the ws connection.
writeBufSize = 32768
// Default NO_PROXY env var IP addresses
defaultNoProxyIP = "169.254.169.254,169.254.170.2"
errClosed = "use of closed network connection"
)
// ReceivedMessage is the intermediate message used to unmarshal a
// message from backend
type ReceivedMessage struct {
Type string `json:"type"`
Message json.RawMessage `json:"message"`
}
// RequestMessage is the intermediate message marshalled to send to backend.
type RequestMessage struct {
Type string `json:"type"`
Message json.RawMessage `json:"message"`
}
// RequestHandler would be func(*ecsacs.T for T in ecsacs.*) to be more proper, but it needs
// to be interface{} to properly capture that
type RequestHandler interface{}
// ClientServer is a combined client and server for the backend websocket connection
type ClientServer interface {
AddRequestHandler(RequestHandler)
// SetAnyRequestHandler takes a function with the signature 'func(i
// interface{})' and calls it with every message the server passes down.
// Only a single 'AnyRequestHandler' will be active at a given time for a
// ClientServer
SetAnyRequestHandler(RequestHandler)
MakeRequest(input interface{}) error
WriteMessage(input []byte) error
Connect() error
IsConnected() bool
SetConnection(conn wsconn.WebsocketConn)
Disconnect(...interface{}) error
Serve() error
SetReadDeadline(t time.Time) error
io.Closer
}
// ClientServerImpl wraps commonly used methods defined in ClientServer interface.
type ClientServerImpl struct {
// AgentConfig is the user-specified runtime configuration
AgentConfig *config.Config
// conn holds the underlying low-level websocket connection
conn wsconn.WebsocketConn
// CredentialProvider is used to retrieve AWS credentials
CredentialProvider *credentials.Credentials
// RequestHandlers is a map from message types to handler functions of the
// form:
// "FooMessage": func(message *ecsacs.FooMessage)
RequestHandlers map[string]RequestHandler
// AnyRequestHandler is a request handler that, if set, is called on every
// message with said message. It will be called before a RequestHandler is
// called. It must take a single interface{} argument.
AnyRequestHandler RequestHandler
// MakeRequestHook is an optional callback that, if set, is called on every
// generated request with the raw request body.
MakeRequestHook MakeRequestHookFunc
// URL is the full url to the backend, including path, querystring, and so on.
URL string
// RWTimeout is the duration used for setting read and write deadlines
// for the websocket connection
RWTimeout time.Duration
// writeLock needed to ensure that only one routine is writing to the socket
writeLock sync.RWMutex
ClientServer
ServiceError
TypeDecoder
}
// MakeRequestHookFunc is a function that is invoked on every generated request
// with the raw request body. MakeRequestHookFunc must return either the body
// to send or an error.
type MakeRequestHookFunc func([]byte) ([]byte, error)
// Connect opens a connection to the backend and upgrades it to a websocket. Calls to
// 'MakeRequest' can be made after calling this, but responses will not be
// receivable until 'Serve' is also called.
func (cs *ClientServerImpl) Connect() error {
seelog.Infof("Establishing a Websocket connection to %s", cs.URL)
parsedURL, err := url.Parse(cs.URL)
if err != nil {
return err
}
wsScheme, err := websocketScheme(parsedURL.Scheme)
if err != nil {
return err
}
parsedURL.Scheme = wsScheme
// NewRequest never returns an error if the url parses and we just verified
// it did above
request, _ := http.NewRequest("GET", parsedURL.String(), nil)
// Sign the request; we'll send its headers via the websocket client which includes the signature
err = utils.SignHTTPRequest(request, cs.AgentConfig.AWSRegion, ServiceName, cs.CredentialProvider, nil)
if err != nil {
return err
}
timeoutDialer := &net.Dialer{Timeout: wsConnectTimeout}
tlsConfig := &tls.Config{ServerName: parsedURL.Host, InsecureSkipVerify: cs.AgentConfig.AcceptInsecureCert}
cipher.WithSupportedCipherSuites(tlsConfig)
// Ensure that NO_PROXY gets set
noProxy := os.Getenv("NO_PROXY")
if noProxy == "" {
dockerHost, err := url.Parse(cs.AgentConfig.DockerEndpoint)
if err == nil {
dockerHost.Scheme = ""
os.Setenv("NO_PROXY", fmt.Sprintf("%s,%s", defaultNoProxyIP, dockerHost.String()))
seelog.Info("NO_PROXY set:", os.Getenv("NO_PROXY"))
} else {
seelog.Errorf("NO_PROXY unable to be set: the configured Docker endpoint is invalid.")
}
}
dialer := websocket.Dialer{
ReadBufferSize: readBufSize,
WriteBufferSize: writeBufSize,
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
NetDial: timeoutDialer.Dial,
HandshakeTimeout: wsHandshakeTimeout,
}
websocketConn, httpResponse, err := dialer.Dial(parsedURL.String(), request.Header)
if httpResponse != nil {
defer httpResponse.Body.Close()
}
if err != nil {
var resp []byte
if httpResponse != nil {
var readErr error
resp, readErr = ioutil.ReadAll(httpResponse.Body)
if readErr != nil {
return fmt.Errorf("Unable to read websocket connection: " + readErr.Error() + ", " + err.Error())
}
// If there's a response, we can try to unmarshal it into one of the
// modeled error types
possibleError, _, decodeErr := DecodeData(resp, cs.TypeDecoder)
if decodeErr == nil {
return cs.NewError(possibleError)
}
}
seelog.Warnf("Error creating a websocket client: %v", err)
return errors.Wrapf(err, "websocket client: unable to dial %s response: %s",
parsedURL.Host, string(resp))
}
cs.writeLock.Lock()
defer cs.writeLock.Unlock()
cs.conn = websocketConn
seelog.Debugf("Established a Websocket connection to %s", cs.URL)
return nil
}
// IsReady gives a boolean response that informs the caller if the websocket
// connection is fully established.
func (cs *ClientServerImpl) IsReady() bool {
cs.writeLock.RLock()
defer cs.writeLock.RUnlock()
return cs.conn != nil
}
// SetConnection passes a websocket connection object into the client. This is used only in
// testing and should be avoided in non-test code.
func (cs *ClientServerImpl) SetConnection(conn wsconn.WebsocketConn) {
cs.conn = conn
}
// SetReadDeadline sets the read deadline for the websocket connection
// A read timeout results in an io error if there are any outstanding reads
// that exceed the deadline
func (cs *ClientServerImpl) SetReadDeadline(t time.Time) error {
err := cs.conn.SetReadDeadline(t)
if err == nil {
return nil
}
seelog.Warnf("Unable to set read deadline for websocket connection: %v for %s", err, cs.URL)
// If we get connection closed error from SetReadDeadline, break out of the for loop and
// return an error
if opErr, ok := err.(*net.OpError); ok && strings.Contains(opErr.Err.Error(), errClosed) {
seelog.Errorf("Stopping redundant reads on closed network connection: %s", cs.URL)
return opErr
}
// An unhandled error has occurred while trying to extend read deadline.
// Try asynchronously closing the connection. We don't want to be blocked on stale connections
// taking too long to close. The flip side is that we might start accumulating stale connections.
// But, that still seems more desirable than waiting for ever for the connection to close
cs.forceCloseConnection()
return err
}
func (cs *ClientServerImpl) forceCloseConnection() {
closeChan := make(chan error)
go func() {
closeChan <- cs.Close()
}()
ctx, cancel := context.WithTimeout(context.TODO(), wsConnectTimeout)
defer cancel()
select {
case closeErr := <-closeChan:
if closeErr != nil {
seelog.Warnf("Unable to close websocket connection: %v for %s",
closeErr, cs.URL)
}
case <-ctx.Done():
if ctx.Err() != nil {
seelog.Warnf("Context canceled waiting for termination of websocket connection: %v for %s",
ctx.Err(), cs.URL)
}
}
}
// Disconnect disconnects the connection
func (cs *ClientServerImpl) Disconnect(...interface{}) error {
cs.writeLock.Lock()
defer cs.writeLock.Unlock()
if cs.conn == nil {
return fmt.Errorf("websocker client: no connection to close")
}
// Close() in turn results in a an internal flushFrame() call in gorilla
// as the close frame needs to be sent to the server. Set the deadline
// for that as well.
if err := cs.conn.SetWriteDeadline(time.Now().Add(cs.RWTimeout)); err != nil {
seelog.Warnf("Unable to set write deadline for websocket connection: %v for %s", err, cs.URL)
}
return cs.conn.Close()
}
// AddRequestHandler adds a request handler to this client.
// A request handler *must* be a function taking a single argument, and that
// argument *must* be a pointer to a recognized 'ecsacs' struct.
// E.g. if you desired to handle messages from acs of type 'FooMessage', you
// would pass the following handler in:
// func(message *ecsacs.FooMessage)
// This function will panic if the passed in function does not have one pointer
// argument or the argument is not a recognized type.
// Additionally, the request handler will block processing of further messages
// on this connection so it's important that it return quickly.
func (cs *ClientServerImpl) AddRequestHandler(f RequestHandler) {
firstArg := reflect.TypeOf(f).In(0)
firstArgTypeStr := firstArg.Elem().Name()
recognizedTypes := cs.GetRecognizedTypes()
_, ok := recognizedTypes[firstArgTypeStr]
if !ok {
panic("AddRequestHandler called with invalid function; argument type not recognized: " + firstArgTypeStr)
}
cs.RequestHandlers[firstArgTypeStr] = f
}
// SetAnyRequestHandler passes a RequestHandler object into the client.
func (cs *ClientServerImpl) SetAnyRequestHandler(f RequestHandler) {
cs.AnyRequestHandler = f
}
// MakeRequest makes a request using the given input. Note, the input *MUST* be
// a pointer to a valid backend type that this client recognises
func (cs *ClientServerImpl) MakeRequest(input interface{}) error {
send, err := cs.CreateRequestMessage(input)
if err != nil {
return err
}
if cs.MakeRequestHook != nil {
send, err = cs.MakeRequestHook(send)
if err != nil {
return err
}
}
// Over the wire we send something like
// {"type":"AckRequest","message":{"messageId":"xyz"}}
return cs.WriteMessage(send)
}
// WriteMessage wraps the low level websocket write method with a lock
func (cs *ClientServerImpl) WriteMessage(send []byte) error {
cs.writeLock.Lock()
defer cs.writeLock.Unlock()
// This is just future proofing. Ignore the error as the gorilla websocket
// library returns 'nil' anyway for SetWriteDeadline
// https://github.com/gorilla/websocket/blob/4201258b820c74ac8e6922fc9e6b52f71fe46f8d/conn.go#L761
if err := cs.conn.SetWriteDeadline(time.Now().Add(cs.RWTimeout)); err != nil {
seelog.Warnf("Unable to set write deadline for websocket connection: %v for %s", err, cs.URL)
}
return cs.conn.WriteMessage(websocket.TextMessage, send)
}
// ConsumeMessages reads messages from the websocket connection and handles read
// messages from an active connection.
func (cs *ClientServerImpl) ConsumeMessages() error {
for {
if err := cs.SetReadDeadline(time.Now().Add(cs.RWTimeout)); err != nil {
return err
}
messageType, message, err := cs.conn.ReadMessage()
switch {
case err == nil:
if messageType != websocket.TextMessage {
// maybe not fatal though, we'll try to process it anyways
seelog.Errorf("Unexpected messageType: %v", messageType)
}
cs.handleMessage(message)
case permissibleCloseCode(err):
seelog.Debugf("Connection closed for a valid reason: %s", err)
return io.EOF
default:
// Unexpected error occurred
seelog.Errorf("Error getting message from ws backend: error: [%v], messageType: [%v] ",
err, messageType)
return err
}
}
}
// CreateRequestMessage creates the request json message using the given input.
// Note, the input *MUST* be a pointer to a valid backend type that this
// client recognises.
func (cs *ClientServerImpl) CreateRequestMessage(input interface{}) ([]byte, error) {
msg := &RequestMessage{}
recognizedTypes := cs.GetRecognizedTypes()
for typeStr, typeVal := range recognizedTypes {
if reflect.TypeOf(input) == reflect.PtrTo(typeVal) {
msg.Type = typeStr
break
}
}
if msg.Type == "" {
return nil, &UnrecognizedWSRequestType{reflect.TypeOf(input).String()}
}
messageData, err := jsonutil.BuildJSON(input)
if err != nil {
return nil, &NotMarshallableWSRequest{msg.Type, err}
}
msg.Message = json.RawMessage(messageData)
send, err := json.Marshal(msg)
if err != nil {
return nil, &NotMarshallableWSRequest{msg.Type, err}
}
return send, nil
}
// handleMessage dispatches a message to the correct 'requestHandler' for its
// type. If no request handler is found, the message is discarded.
func (cs *ClientServerImpl) handleMessage(data []byte) {
typedMessage, typeStr, err := DecodeData(data, cs.TypeDecoder)
if err != nil {
seelog.Warnf("Unable to handle message from backend: %v", err)
return
}
seelog.Debugf("Received message of type: %s", typeStr)
if cs.AnyRequestHandler != nil {
reflect.ValueOf(cs.AnyRequestHandler).Call([]reflect.Value{reflect.ValueOf(typedMessage)})
}
if handler, ok := cs.RequestHandlers[typeStr]; ok {
reflect.ValueOf(handler).Call([]reflect.Value{reflect.ValueOf(typedMessage)})
} else {
seelog.Infof("No handler for message type: %s %s", typeStr, typedMessage)
}
}
func websocketScheme(httpScheme string) (string, error) {
// gorilla/websocket expects the websocket scheme (ws[s]://)
var wsScheme string
switch httpScheme {
case "http":
wsScheme = "ws"
case "https":
wsScheme = "wss"
default:
return "", fmt.Errorf("wsclient: unknown scheme %s", httpScheme)
}
return wsScheme, nil
}
// See https://github.com/gorilla/websocket/blob/87f6f6a22ebfbc3f89b9ccdc7fddd1b914c095f9/conn.go#L650
func permissibleCloseCode(err error) bool {
return websocket.IsCloseError(err,
websocket.CloseNormalClosure, // websocket error code 1000
websocket.CloseAbnormalClosure, // websocket error code 1006
websocket.CloseGoingAway, // websocket error code 1001
websocket.CloseInternalServerErr) // websocket error code 1011
}
|
[
"\"NO_PROXY\"",
"\"NO_PROXY\""
] |
[] |
[
"NO_PROXY"
] |
[]
|
["NO_PROXY"]
|
go
| 1 | 0 | |
test/integration/master/master_test.go
|
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/ghodss/yaml"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
clienttypedv1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
)
func testPrefix(t *testing.T, prefix string) {
_, s := framework.RunAMaster(nil)
defer s.Close()
resp, err := http.Get(s.URL + prefix)
if err != nil {
t.Fatalf("unexpected error getting %s prefix: %v", prefix, err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
}
}
func TestAutoscalingPrefix(t *testing.T) {
testPrefix(t, "/apis/autoscaling/")
}
func TestBatchPrefix(t *testing.T) {
testPrefix(t, "/apis/batch/")
}
func TestAppsPrefix(t *testing.T) {
testPrefix(t, "/apis/apps/")
}
func TestExtensionsPrefix(t *testing.T) {
testPrefix(t, "/apis/extensions/")
}
func TestEmptyList(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
u := s.URL + "/api/v1/namespaces/default/pods"
resp, err := http.Get(u)
if err != nil {
t.Fatalf("unexpected error getting %s: %v", u, err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
}
defer resp.Body.Close()
data, _ := ioutil.ReadAll(resp.Body)
decodedData := map[string]interface{}{}
if err := json.Unmarshal(data, &decodedData); err != nil {
t.Logf("body: %s", string(data))
t.Fatalf("got error decoding data: %v", err)
}
if items, ok := decodedData["items"]; !ok {
t.Logf("body: %s", string(data))
t.Fatalf("missing items field in empty list (all lists should return an items field)")
} else if items == nil {
t.Logf("body: %s", string(data))
t.Fatalf("nil items field from empty list (all lists should return non-nil empty items lists)")
}
}
func TestWatchSucceedsWithoutArgs(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
resp, err := http.Get(s.URL + "/api/v1/namespaces?watch=1")
if err != nil {
t.Fatalf("unexpected error getting experimental prefix: %v", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
}
resp.Body.Close()
}
var hpaV1 string = `
{
"apiVersion": "autoscaling/v1",
"kind": "HorizontalPodAutoscaler",
"metadata": {
"name": "test-hpa",
"namespace": "default"
},
"spec": {
"scaleTargetRef": {
"kind": "ReplicationController",
"name": "test-hpa",
"namespace": "default"
},
"minReplicas": 1,
"maxReplicas": 10,
"targetCPUUtilizationPercentage": 50
}
}
`
func autoscalingPath(resource, namespace, name string) string {
return testapi.Autoscaling.ResourcePath(resource, namespace, name)
}
func batchPath(resource, namespace, name string) string {
return testapi.Batch.ResourcePath(resource, namespace, name)
}
func extensionsPath(resource, namespace, name string) string {
return testapi.Extensions.ResourcePath(resource, namespace, name)
}
func TestAutoscalingGroupBackwardCompatibility(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
transport := http.DefaultTransport
requests := []struct {
verb string
URL string
body string
expectedStatusCodes map[int]bool
expectedVersion string
}{
{"POST", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), hpaV1, integration.Code201, ""},
{"GET", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Autoscaling.GroupVersion().String()},
{"GET", extensionsPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
}
for _, r := range requests {
bodyBytes := bytes.NewReader([]byte(r.body))
req, err := http.NewRequest(r.verb, s.URL+r.URL, bodyBytes)
if err != nil {
t.Logf("case %v", r)
t.Fatalf("unexpected error: %v", err)
}
func() {
resp, err := transport.RoundTrip(req)
defer resp.Body.Close()
if err != nil {
t.Logf("case %v", r)
t.Fatalf("unexpected error: %v", err)
}
b, _ := ioutil.ReadAll(resp.Body)
body := string(b)
if _, ok := r.expectedStatusCodes[resp.StatusCode]; !ok {
t.Logf("case %v", r)
t.Errorf("Expected status one of %v, but got %v", r.expectedStatusCodes, resp.StatusCode)
t.Errorf("Body: %v", body)
}
if !strings.Contains(body, "\"apiVersion\":\""+r.expectedVersion) {
t.Logf("case %v", r)
t.Errorf("Expected version %v, got body %v", r.expectedVersion, body)
}
}()
}
}
func TestAccept(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
resp, err := http.Get(s.URL + "/api/")
if err != nil {
t.Fatalf("unexpected error getting api: %v", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
}
body, _ := ioutil.ReadAll(resp.Body)
if resp.Header.Get("Content-Type") != "application/json" {
t.Errorf("unexpected content: %s", body)
}
if err := json.Unmarshal(body, &map[string]interface{}{}); err != nil {
t.Fatal(err)
}
req, err := http.NewRequest("GET", s.URL+"/api/", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Accept", "application/yaml")
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
body, _ = ioutil.ReadAll(resp.Body)
if resp.Header.Get("Content-Type") != "application/yaml" {
t.Errorf("unexpected content: %s", body)
}
t.Logf("body: %s", body)
if err := yaml.Unmarshal(body, &map[string]interface{}{}); err != nil {
t.Fatal(err)
}
req, err = http.NewRequest("GET", s.URL+"/api/", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Accept", "application/json, application/yaml")
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
body, _ = ioutil.ReadAll(resp.Body)
if resp.Header.Get("Content-Type") != "application/json" {
t.Errorf("unexpected content: %s", body)
}
t.Logf("body: %s", body)
if err := yaml.Unmarshal(body, &map[string]interface{}{}); err != nil {
t.Fatal(err)
}
req, err = http.NewRequest("GET", s.URL+"/api/", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Accept", "application") // not a valid media type
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusNotAcceptable {
t.Errorf("unexpected error from the server")
}
}
func countEndpoints(eps *api.Endpoints) int {
count := 0
for i := range eps.Subsets {
count += len(eps.Subsets[i].Addresses) * len(eps.Subsets[i].Ports)
}
return count
}
func TestMasterService(t *testing.T) {
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
svcList, err := client.Core().Services(api.NamespaceDefault).List(api.ListOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
return false, nil
}
found := false
for i := range svcList.Items {
if svcList.Items[i].Name == "kubernetes" {
found = true
break
}
}
if found {
ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
return false, nil
}
if countEndpoints(ep) == 0 {
return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep)
}
return true, nil
}
return false, nil
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestServiceAlloc(t *testing.T) {
cfg := framework.NewIntegrationTestMasterConfig()
_, cidr, err := net.ParseCIDR("192.168.0.0/29")
if err != nil {
t.Fatalf("bad cidr: %v", err)
}
cfg.ServiceIPRange = *cidr
_, s := framework.RunAMaster(cfg)
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
svc := func(i int) *api.Service {
return &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("svc-%v", i),
},
Spec: api.ServiceSpec{
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{
{Port: 80},
},
},
}
}
// Wait until the default "kubernetes" service is created.
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return false, err
}
return !errors.IsNotFound(err), nil
}); err != nil {
t.Fatalf("creating kubernetes service timed out")
}
// make 5 more services to take up all IPs
for i := 0; i < 5; i++ {
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(i)); err != nil {
t.Error(err)
}
}
// Make another service. It will fail because we're out of cluster IPs
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil {
if !strings.Contains(err.Error(), "range is full") {
t.Errorf("unexpected error text: %v", err)
}
} else {
svcs, err := client.Core().Services(api.NamespaceAll).List(api.ListOptions{})
if err != nil {
t.Fatalf("unexpected success, and error getting the services: %v", err)
}
allIPs := []string{}
for _, s := range svcs.Items {
allIPs = append(allIPs, s.Spec.ClusterIP)
}
t.Fatalf("unexpected creation success. The following IPs exist: %#v. It should only be possible to allocate 2 IP addresses in this cluster.\n\n%#v", allIPs, svcs)
}
// Delete the first service.
if err := client.Core().Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
// This time creating the second service should work.
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
}
// TestUpdateNodeObjects represents a simple version of the behavior of node checkins at steady
// state. This test allows for easy profiling of a realistic master scenario for baseline CPU
// in very large clusters. It is disabled by default - start a kube-apiserver and pass
// UPDATE_NODE_APISERVER as the host value.
func TestUpdateNodeObjects(t *testing.T) {
server := os.Getenv("UPDATE_NODE_APISERVER")
if len(server) == 0 {
t.Skip("UPDATE_NODE_APISERVER is not set")
}
c := clienttypedv1.NewForConfigOrDie(&restclient.Config{
QPS: 10000,
Host: server,
ContentConfig: restclient.ContentConfig{
AcceptContentTypes: "application/vnd.kubernetes.protobuf",
ContentType: "application/vnd.kubernetes.protobuf",
},
})
nodes := 400
listers := 5
watchers := 50
iterations := 10000
for i := 0; i < nodes*6; i++ {
c.Nodes().Delete(fmt.Sprintf("node-%d", i), nil)
_, err := c.Nodes().Create(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("node-%d", i),
},
})
if err != nil {
t.Fatal(err)
}
}
for k := 0; k < listers; k++ {
go func(lister int) {
for i := 0; i < iterations; i++ {
_, err := c.Nodes().List(v1.ListOptions{})
if err != nil {
fmt.Printf("[list:%d] error after %d: %v\n", lister, i, err)
break
}
time.Sleep(time.Duration(lister)*10*time.Millisecond + 1500*time.Millisecond)
}
}(k)
}
for k := 0; k < watchers; k++ {
go func(lister int) {
w, err := c.Nodes().Watch(v1.ListOptions{})
if err != nil {
fmt.Printf("[watch:%d] error: %v", k, err)
return
}
i := 0
for r := range w.ResultChan() {
i++
if _, ok := r.Object.(*v1.Node); !ok {
fmt.Printf("[watch:%d] unexpected object after %d: %#v\n", lister, i, r)
}
if i%100 == 0 {
fmt.Printf("[watch:%d] iteration %d ...\n", lister, i)
}
}
fmt.Printf("[watch:%d] done\n", lister)
}(k)
}
var wg sync.WaitGroup
wg.Add(nodes - listers)
for j := 0; j < nodes; j++ {
go func(node int) {
var lastCount int
for i := 0; i < iterations; i++ {
if i%100 == 0 {
fmt.Printf("[%d] iteration %d ...\n", node, i)
}
if i%20 == 0 {
_, err := c.Nodes().List(v1.ListOptions{})
if err != nil {
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
break
}
}
r, err := c.Nodes().List(v1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.name=node-%d", node),
ResourceVersion: "0",
})
if err != nil {
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
break
}
if len(r.Items) != 1 {
fmt.Printf("[%d] error after %d: unexpected list count\n", node, i)
break
}
n, err := c.Nodes().Get(fmt.Sprintf("node-%d", node), metav1.GetOptions{})
if err != nil {
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
break
}
if len(n.Status.Conditions) != lastCount {
fmt.Printf("[%d] worker set %d, read %d conditions\n", node, lastCount, len(n.Status.Conditions))
break
}
previousCount := lastCount
switch {
case i%4 == 0:
lastCount = 1
n.Status.Conditions = []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "foo",
},
}
case i%4 == 1:
lastCount = 2
n.Status.Conditions = []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
Reason: "foo",
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionTrue,
Reason: "bar",
},
}
case i%4 == 1:
lastCount = 0
n.Status.Conditions = nil
}
if _, err := c.Nodes().UpdateStatus(n); err != nil {
if !errors.IsConflict(err) {
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
break
}
lastCount = previousCount
}
}
wg.Done()
fmt.Printf("[%d] done\n", node)
}(j)
}
wg.Wait()
}
|
[
"\"UPDATE_NODE_APISERVER\""
] |
[] |
[
"UPDATE_NODE_APISERVER"
] |
[]
|
["UPDATE_NODE_APISERVER"]
|
go
| 1 | 0 | |
vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/provider/provider.go
|
package provider
import (
"fmt"
"log"
"os"
"strings"
"github.com/hashicorp/go-azure-helpers/authentication"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils"
)
func AzureProvider() terraform.ResourceProvider {
// avoids this showing up in test output
var debugLog = func(f string, v ...interface{}) {
if os.Getenv("TF_LOG") == "" {
return
}
if os.Getenv("TF_ACC") != "" {
return
}
log.Printf(f, v...)
}
dataSources := make(map[string]*schema.Resource)
resources := make(map[string]*schema.Resource)
for _, service := range SupportedServices() {
debugLog("[DEBUG] Registering Data Sources for %q..", service.Name())
for k, v := range service.SupportedDataSources() {
if existing := dataSources[k]; existing != nil {
panic(fmt.Sprintf("An existing Data Source exists for %q", k))
}
dataSources[k] = v
}
debugLog("[DEBUG] Registering Resources for %q..", service.Name())
for k, v := range service.SupportedResources() {
if existing := resources[k]; existing != nil {
panic(fmt.Sprintf("An existing Resource exists for %q", k))
}
resources[k] = v
}
}
// TODO: remove all of this in 2.0 once Custom Timeouts are supported
if !features.SupportsCustomTimeouts() {
// ensure any timeouts configured on the resources are removed until 2.0
for _, v := range resources {
v.Timeouts = nil
}
}
p := &schema.Provider{
Schema: map[string]*schema.Schema{
"subscription_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""),
Description: "The Subscription ID which should be used.",
},
"client_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
Description: "The Client ID which should be used.",
},
"tenant_id": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
Description: "The Tenant ID which should be used.",
},
"auxiliary_tenant_ids": {
Type: schema.TypeList,
Optional: true,
MaxItems: 3,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"environment": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"),
Description: "The Cloud Environment which should be used. Possible values are public, usgovernment, german, and china. Defaults to public.",
},
// Client Certificate specific fields
"client_certificate_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""),
Description: "The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service Principal using a Client Certificate.",
},
"client_certificate_password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""),
Description: "The password associated with the Client Certificate. For use when authenticating as a Service Principal using a Client Certificate",
},
// Client Secret specific fields
"client_secret": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
Description: "The Client Secret which should be used. For use When authenticating as a Service Principal using a Client Secret.",
},
// Managed Service Identity specific fields
"use_msi": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false),
Description: "Allowed Managed Service Identity be used for Authentication.",
},
"msi_endpoint": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""),
Description: "The path to a custom endpoint for Managed Service Identity - in most circumstances this should be detected automatically. ",
},
// Managed Tracking GUID for User-agent
"partner_id": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.Any(validation.IsUUID, validation.StringIsEmpty),
DefaultFunc: schema.EnvDefaultFunc("ARM_PARTNER_ID", ""),
Description: "A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.",
},
"disable_correlation_request_id": {
Type: schema.TypeBool,
Optional: true,
// TODO: add an ARM_ prefix in 2.0w
DefaultFunc: schema.EnvDefaultFunc("DISABLE_CORRELATION_REQUEST_ID", false),
Description: "This will disable the x-ms-correlation-request-id header.",
},
"disable_terraform_partner_id": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_DISABLE_TERRAFORM_PARTNER_ID", false),
Description: "This will disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.",
},
"features": schemaFeatures(),
// Advanced feature flags
"skip_credentials_validation": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_CREDENTIALS_VALIDATION", false),
Description: "This will cause the AzureRM Provider to skip verifying the credentials being used are valid.",
},
"skip_provider_registration": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_SKIP_PROVIDER_REGISTRATION", false),
Description: "Should the AzureRM Provider skip registering all of the Resource Providers that it supports, if they're not already registered?",
},
"storage_use_azuread": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ARM_STORAGE_USE_AZUREAD", false),
Description: "Should the AzureRM Provider use AzureAD to access the Storage Data Plane API's?",
},
},
DataSourcesMap: dataSources,
ResourcesMap: resources,
}
p.ConfigureFunc = providerConfigure(p)
return p
}
func providerConfigure(p *schema.Provider) schema.ConfigureFunc {
return func(d *schema.ResourceData) (interface{}, error) {
var auxTenants []string
if v, ok := d.Get("auxiliary_tenant_ids").([]interface{}); ok && len(v) > 0 {
auxTenants = *utils.ExpandStringSlice(v)
} else {
if v := os.Getenv("ARM_AUXILIARY_TENANT_IDS"); v != "" {
auxTenants = strings.Split(v, ";")
}
}
if len(auxTenants) > 3 {
return nil, fmt.Errorf("The provider only supports 3 auxiliary tenant IDs")
}
builder := &authentication.Builder{
SubscriptionID: d.Get("subscription_id").(string),
ClientID: d.Get("client_id").(string),
ClientSecret: d.Get("client_secret").(string),
TenantID: d.Get("tenant_id").(string),
AuxiliaryTenantIDs: auxTenants,
Environment: d.Get("environment").(string),
MsiEndpoint: d.Get("msi_endpoint").(string),
ClientCertPassword: d.Get("client_certificate_password").(string),
ClientCertPath: d.Get("client_certificate_path").(string),
// Feature Toggles
SupportsClientCertAuth: true,
SupportsClientSecretAuth: true,
SupportsManagedServiceIdentity: d.Get("use_msi").(bool),
SupportsAzureCliToken: true,
SupportsAuxiliaryTenants: len(auxTenants) > 0,
// Doc Links
ClientSecretDocsLink: "https://www.terraform.io/docs/providers/azurerm/guides/service_principal_client_secret.html",
}
config, err := builder.Build()
if err != nil {
return nil, fmt.Errorf("Error building AzureRM Client: %s", err)
}
terraformVersion := p.TerraformVersion
if terraformVersion == "" {
// Terraform 0.12 introduced this field to the protocol
// We can therefore assume that if it's missing it's 0.10 or 0.11
terraformVersion = "0.11+compatible"
}
skipProviderRegistration := d.Get("skip_provider_registration").(bool)
clientBuilder := clients.ClientBuilder{
AuthConfig: config,
SkipProviderRegistration: skipProviderRegistration,
TerraformVersion: terraformVersion,
PartnerId: d.Get("partner_id").(string),
DisableCorrelationRequestID: d.Get("disable_correlation_request_id").(bool),
DisableTerraformPartnerID: d.Get("disable_terraform_partner_id").(bool),
Features: expandFeatures(d.Get("features").([]interface{})),
StorageUseAzureAD: d.Get("storage_use_azuread").(bool),
}
client, err := clients.Build(p.StopContext(), clientBuilder)
if err != nil {
return nil, err
}
client.StopContext = p.StopContext()
// replaces the context between tests
p.MetaReset = func() error {
client.StopContext = p.StopContext()
return nil
}
skipCredentialsValidation := d.Get("skip_credentials_validation").(bool)
if !skipCredentialsValidation {
// List all the available providers and their registration state to avoid unnecessary
// requests. This also lets us check if the provider credentials are correct.
ctx := client.StopContext
providerList, err := client.Resource.ProvidersClient.List(ctx, nil, "")
if err != nil {
return nil, fmt.Errorf("Unable to list provider registration status, it is possible that this is due to invalid "+
"credentials or the service principal does not have permission to use the Resource Manager API, Azure "+
"error: %s", err)
}
if !skipProviderRegistration {
availableResourceProviders := providerList.Values()
requiredResourceProviders := RequiredResourceProviders()
err := EnsureResourceProvidersAreRegistered(ctx, *client.Resource.ProvidersClient, availableResourceProviders, requiredResourceProviders)
if err != nil {
return nil, fmt.Errorf(resourceProviderRegistrationErrorFmt, err)
}
}
}
return client, nil
}
}
const resourceProviderRegistrationErrorFmt = `Error ensuring Resource Providers are registered.
Terraform automatically attempts to register the Resource Providers it supports to
ensure it's able to provision resources.
If you don't have permission to register Resource Providers you may wish to use the
"skip_provider_registration" flag in the Provider block to disable this functionality.
Please note that if you opt out of Resource Provider Registration and Terraform tries
to provision a resource from a Resource Provider which is unregistered, then the errors
may appear misleading - for example:
> API version 2019-XX-XX was not found for Microsoft.Foo
Could indicate either that the Resource Provider "Microsoft.Foo" requires registration,
but this could also indicate that this Azure Region doesn't support this API version.
More information on the "skip_provider_registration" flag can be found here:
https://www.terraform.io/docs/providers/azurerm/index.html#skip_provider_registration
Original Error: %s`
|
[
"\"TF_LOG\"",
"\"TF_ACC\"",
"\"ARM_AUXILIARY_TENANT_IDS\""
] |
[] |
[
"TF_LOG",
"ARM_AUXILIARY_TENANT_IDS",
"TF_ACC"
] |
[]
|
["TF_LOG", "ARM_AUXILIARY_TENANT_IDS", "TF_ACC"]
|
go
| 3 | 0 | |
msg-extractor.py
|
import extract_msg,sys,re
inputmail = input("Name of the msg file: ")
mail = inputmail + r'.msg'
msg = extract_msg.Message(mail)
msg_sender = msg.sender
msg_date = msg.date
msg_subj = msg.subject
msg_message = msg.body
result= r'Results_' + mail + r'.txt'
regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
f = open(result,'w')
f.write('Results from .msg uploaded: \n \n')
f.write('Sender: {}\n'.format(msg_sender))
f.write('Sent On: {}\n'.format(msg_date))
f.write('Subject: {}\n'.format(msg_subj))
f.write('\nLinks detected >>>>>>>>>>>>>>>>>>>>>>>>> \n')
match = re.findall(regex, msg_message)
for m in match:
#print(m)
f.write('<{}\n'.format(m))
print('Done, check your directory for the results')
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
api/client/cli.go
|
package client
import (
"errors"
"fmt"
"io"
"net/http"
"os"
"runtime"
"github.com/docker/docker/api"
cliflags "github.com/docker/docker/cli/flags"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/cliconfig/configfile"
"github.com/docker/docker/cliconfig/credentials"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/term"
"github.com/docker/engine-api/client"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
)
// DockerCli represents the docker command line client.
// Instances of the client can be returned from NewDockerCli.
type DockerCli struct {
// initializing closure
init func() error
// configFile has the client configuration file
configFile *configfile.ConfigFile
// in holds the input stream and closer (io.ReadCloser) for the client.
in io.ReadCloser
// out holds the output stream (io.Writer) for the client.
out io.Writer
// err holds the error stream (io.Writer) for the client.
err io.Writer
// keyFile holds the key file as a string.
keyFile string
// inFd holds the file descriptor of the client's STDIN (if valid).
inFd uintptr
// outFd holds file descriptor of the client's STDOUT (if valid).
outFd uintptr
// isTerminalIn indicates whether the client's STDIN is a TTY
isTerminalIn bool
// isTerminalOut indicates whether the client's STDOUT is a TTY
isTerminalOut bool
// client is the http client that performs all API operations
client client.APIClient
// state holds the terminal input state
inState *term.State
// outState holds the terminal output state
outState *term.State
}
// Initialize calls the init function that will setup the configuration for the client
// such as the TLS, tcp and other parameters used to run the client.
func (cli *DockerCli) Initialize() error {
if cli.init == nil {
return nil
}
return cli.init()
}
// Client returns the APIClient
func (cli *DockerCli) Client() client.APIClient {
return cli.client
}
// Out returns the writer used for stdout
func (cli *DockerCli) Out() io.Writer {
return cli.out
}
// Err returns the writer used for stderr
func (cli *DockerCli) Err() io.Writer {
return cli.err
}
// In returns the reader used for stdin
func (cli *DockerCli) In() io.ReadCloser {
return cli.in
}
// ConfigFile returns the ConfigFile
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
return cli.configFile
}
// IsTerminalOut returns true if the clients stdin is a TTY
func (cli *DockerCli) IsTerminalOut() bool {
return cli.isTerminalOut
}
// OutFd returns the fd for the stdout stream
func (cli *DockerCli) OutFd() uintptr {
return cli.outFd
}
// CheckTtyInput checks if we are trying to attach to a container tty
// from a non-tty client input stream, and if so, returns an error.
func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error {
// In order to attach to a container tty, input stream for the client must
// be a tty itself: redirecting or piping the client standard input is
// incompatible with `docker run -t`, `docker exec -t` or `docker attach`.
if ttyMode && attachStdin && !cli.isTerminalIn {
eText := "the input device is not a TTY"
if runtime.GOOS == "windows" {
return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'")
}
return errors.New(eText)
}
return nil
}
// PsFormat returns the format string specified in the configuration.
// String contains columns and format specification, for example {{ID}}\t{{Name}}.
func (cli *DockerCli) PsFormat() string {
return cli.configFile.PsFormat
}
// ImagesFormat returns the format string specified in the configuration.
// String contains columns and format specification, for example {{ID}}\t{{Name}}.
func (cli *DockerCli) ImagesFormat() string {
return cli.configFile.ImagesFormat
}
func (cli *DockerCli) setRawTerminal() error {
if os.Getenv("NORAW") == "" {
if cli.isTerminalIn {
state, err := term.SetRawTerminal(cli.inFd)
if err != nil {
return err
}
cli.inState = state
}
if cli.isTerminalOut {
state, err := term.SetRawTerminalOutput(cli.outFd)
if err != nil {
return err
}
cli.outState = state
}
}
return nil
}
func (cli *DockerCli) restoreTerminal(in io.Closer) error {
if cli.inState != nil {
term.RestoreTerminal(cli.inFd, cli.inState)
}
if cli.outState != nil {
term.RestoreTerminal(cli.outFd, cli.outState)
}
// WARNING: DO NOT REMOVE THE OS CHECK !!!
// For some reason this Close call blocks on darwin..
// As the client exists right after, simply discard the close
// until we find a better solution.
if in != nil && runtime.GOOS != "darwin" {
return in.Close()
}
return nil
}
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config
// is set the client scheme will be set to https.
// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035).
func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cliflags.ClientFlags) *DockerCli {
cli := &DockerCli{
in: in,
out: out,
err: err,
keyFile: clientFlags.Common.TrustKey,
}
cli.init = func() error {
clientFlags.PostParse()
cli.configFile = LoadDefaultConfigFile(err)
client, err := NewAPIClientFromFlags(clientFlags, cli.configFile)
if err != nil {
return err
}
cli.client = client
if cli.in != nil {
cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in)
}
if cli.out != nil {
cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out)
}
return nil
}
return cli
}
// LoadDefaultConfigFile attempts to load the default config file and returns
// an initialized ConfigFile struct if none is found.
func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile {
configFile, e := cliconfig.Load(cliconfig.ConfigDir())
if e != nil {
fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e)
}
if !configFile.ContainsAuth() {
credentials.DetectDefaultStore(configFile)
}
return configFile
}
// NewAPIClientFromFlags creates a new APIClient from command line flags
func NewAPIClientFromFlags(clientFlags *cliflags.ClientFlags, configFile *configfile.ConfigFile) (client.APIClient, error) {
host, err := getServerHost(clientFlags.Common.Hosts, clientFlags.Common.TLSOptions)
if err != nil {
return &client.Client{}, err
}
customHeaders := configFile.HTTPHeaders
if customHeaders == nil {
customHeaders = map[string]string{}
}
customHeaders["User-Agent"] = clientUserAgent()
verStr := api.DefaultVersion
if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" {
verStr = tmpStr
}
httpClient, err := newHTTPClient(host, clientFlags.Common.TLSOptions)
if err != nil {
return &client.Client{}, err
}
return client.NewClient(host, verStr, httpClient, customHeaders)
}
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) {
switch len(hosts) {
case 0:
host = os.Getenv("DOCKER_HOST")
case 1:
host = hosts[0]
default:
return "", errors.New("请只指定一个 -H")
}
host, err = opts.ParseHost(tlsOptions != nil, host)
return
}
func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) {
if tlsOptions == nil {
// let the api client configure the default transport.
return nil, nil
}
config, err := tlsconfig.Client(*tlsOptions)
if err != nil {
return nil, err
}
tr := &http.Transport{
TLSClientConfig: config,
}
proto, addr, _, err := client.ParseHost(host)
if err != nil {
return nil, err
}
sockets.ConfigureTransport(tr, proto, addr)
return &http.Client{
Transport: tr,
}, nil
}
func clientUserAgent() string {
return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")"
}
|
[
"\"NORAW\"",
"\"DOCKER_API_VERSION\"",
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_API_VERSION",
"DOCKER_HOST",
"NORAW"
] |
[]
|
["DOCKER_API_VERSION", "DOCKER_HOST", "NORAW"]
|
go
| 3 | 0 | |
pyscreenshot/util.py
|
from easyprocess import EasyProcess
import os
import sys
def py2():
return sys.version_info[0] == 2
def py3():
return sys.version_info[0] == 3
def py_minor():
return sys.version_info[1]
def platform_is_osx():
return sys.platform == "darwin"
def platform_is_win():
return sys.platform == "win32"
def platform_is_linux():
return sys.platform.startswith("linux")
def use_x_display():
if platform_is_win():
return False
if platform_is_osx(): # TODO: test X on osx
return False
DISPLAY = os.environ.get("DISPLAY")
XDG_SESSION_TYPE = os.environ.get("XDG_SESSION_TYPE")
# Xwayland can not be used for screenshot
return DISPLAY and XDG_SESSION_TYPE != "wayland"
def extract_version(txt):
"""This function tries to extract the version from the help text of any
program."""
words = txt.replace(",", " ").split()
version = None
for x in reversed(words):
if len(x) > 2:
if x[0].lower() == "v":
x = x[1:]
if "." in x and x[0].isdigit():
version = x
break
return version
def proc(name, params=[]):
python = sys.executable
cmd = [python, "-m", name] + params
p = EasyProcess(cmd).call()
return p
|
[] |
[] |
[
"XDG_SESSION_TYPE",
"DISPLAY"
] |
[]
|
["XDG_SESSION_TYPE", "DISPLAY"]
|
python
| 2 | 0 | |
pkg/venafi/tpp/connector_test.go
|
/*
* Copyright 2018 Venafi, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tpp
import (
"crypto/ecdsa"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"testing"
"time"
"github.com/Venafi/vcert/v4/pkg/policy"
"github.com/Venafi/vcert/v4/pkg/util"
"github.com/Venafi/vcert/v4/pkg/certificate"
"github.com/Venafi/vcert/v4/pkg/endpoint"
"github.com/Venafi/vcert/v4/pkg/verror"
"github.com/Venafi/vcert/v4/test"
)
var ctx *test.Context
func init() {
ctx = test.GetEnvContext()
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
Renegotiation: tls.RenegotiateFreelyAsClient,
InsecureSkipVerify: true}
if ctx.TPPurl == "" {
fmt.Println("TPP URL cannot be empty. See Makefile")
os.Exit(1)
}
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
panic(err)
}
resp, err := tpp.GetRefreshToken(&endpoint.Authentication{
User: ctx.TPPuser, Password: ctx.TPPPassword,
Scope: "certificate:discover,manage,revoke;configuration:manage;ssh:manage"})
if err != nil {
panic(err)
}
ctx.TPPRefreshToken = resp.Refresh_token
ctx.TPPaccessToken = resp.Access_token
}
func getTestConnector(url string, zone string) (c *Connector, err error) {
c, err = NewConnector(url, zone, false, nil)
c.client = &http.Client{}
return c, err
}
func TestNewConnectorURLSuccess(t *testing.T) {
tests := map[string]string{
"http": "http://example.com",
"https": "https://example.com",
"host_path_only": "example.com/vedsdk/",
"trailing_vedsdk": "https://example.com/vedsdk",
"trailing_vedsdk_slash": "https://example.com/vedsdk/",
"upper_case": "HTTPS://EXAMPLE.COM/VEDSDK/",
"mixed_case": "https://EXAMPLE.com/vedsdk/",
}
for label, urlString := range tests {
t.Run(label, func(t *testing.T) {
c, err := NewConnector(urlString, "", false, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if c == nil {
t.Fatal("unexpected nil connector")
}
u, err := url.Parse(c.baseURL)
if err != nil {
t.Errorf("failed to parse baseURL: %v", err)
}
if u.Scheme != "https" {
t.Errorf("unexpected URL scheme: %v", u.Scheme)
}
if !strings.HasSuffix(u.Path, "/") {
t.Errorf("missing trailing slash: %v", u.Path)
}
if strings.HasSuffix(u.Path, "vedsdk/") {
t.Errorf("unstripped vedsdk: %v", u.Path)
}
})
}
}
func TestNewConnectorURLErrors(t *testing.T) {
tests := map[string]string{
"empty": "",
"bad_scheme": "ftp://example.com",
"schemaless": "//example.com",
"trailing_other": "https://example.com/foo/",
"nested_vedsdk": "https://example.com/foo/vedsdk",
}
for label, url := range tests {
t.Run(label, func(t *testing.T) {
c, err := NewConnector(url, "", false, nil)
if err == nil {
t.Error("expected an error")
}
if c != nil {
t.Error("expected nil connector")
}
if !errors.Is(err, verror.UserDataError) {
t.Errorf("expected a UserDataError, got: %v", err)
}
})
}
}
func TestAuthenticateAuthError(t *testing.T) {
// An attempt to Authenticate with invalid credentials results in an
// AuthError.
// TODO: Test that all Authenticate errors wrap verrors.AuthError
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, ctx.TPPurl)
}
err = tpp.Authenticate(&endpoint.Authentication{
User: "invalid-user",
Password: "invalid-password",
})
if err == nil {
t.Fatalf("expected an error")
}
if !errors.Is(err, verror.AuthError) {
t.Errorf("expected AuthError, got %v", err)
}
}
func TestPingTPP(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, ctx.TPPurl)
}
err = tpp.Ping()
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
func TestBadPingTPP(t *testing.T) {
tpp, err := getTestConnector("http://bonjo-w10dev:333/vedsdk/", ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: http://bonjo-w10dev:333/vedsdk/", err)
}
err = tpp.Ping()
if err == nil {
t.Fatalf("err should not be nil, URL does not exist")
}
}
func TestGetRefreshToken(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, "")
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
refreshToken, err := tpp.GetRefreshToken(&endpoint.Authentication{
User: ctx.TPPuser, Password: ctx.TPPPassword,
Scope: "certificate:discover,manage,revoke", ClientId: "vcert-sdk"})
if err != nil {
t.Fatalf("%s", err)
}
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: refreshToken.Access_token})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
tpp.SetZone(ctx.TPPZone)
_, err = tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("%s", err)
}
}
func TestGetRefreshTokenWithDefaultScope(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
refreshToken, err := tpp.GetRefreshToken(&endpoint.Authentication{
User: ctx.TPPuser, Password: ctx.TPPPassword})
if err != nil {
t.Fatalf("%s", err)
}
if refreshToken.Scope != defaultScope {
t.Fatalf("Scope from refresh roken %s is not as default scope %s;", refreshToken.Scope, defaultScope)
}
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: refreshToken.Access_token})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
tpp.SetZone(ctx.TPPZone)
_, err = tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("%s", err)
}
}
func TestFailRefreshAccessToken(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
auth := &endpoint.Authentication{RefreshToken: "WRONGREFRESHTOKEN", ClientId: ctx.ClientID}
err = tpp.Authenticate(auth)
if err == nil {
t.Fatalf("err should not be nil, er")
}
if !strings.Contains(err.Error(), "unexpected status code on TPP Authorize. Status: 400") {
t.Fatalf("error text should contain: 'unexpected status code on TPP Authorize. Status: 400'. but it is: '%s'", err)
}
}
func TestRefreshAccessToken(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, "")
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
auth := &endpoint.Authentication{RefreshToken: ctx.TPPRefreshToken, ClientId: ctx.ClientID}
err = tpp.Authenticate(auth)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
tpp.SetZone(ctx.TPPZone)
_, err = tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("%s", err)
}
//Uppdate refresh token for further tests
ctx.TPPRefreshToken = auth.RefreshToken
}
func TestRefreshAccessTokenNoClientID(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
auth := &endpoint.Authentication{RefreshToken: ctx.TPPRefreshToken}
err = tpp.Authenticate(auth)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
tpp.SetZone(ctx.TPPZone)
_, err = tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("%s", err)
}
//Update tokens for further tests
ctx.TPPRefreshToken = auth.RefreshToken
ctx.TPPaccessToken = tpp.accessToken
}
func TestAuthenticationAccessToken(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
tpp.SetZone(ctx.TPPZone)
_, err = tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("%s", err)
}
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: "WRONGm3XPAT5nlWxd3iA=="})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
tpp.SetZone(ctx.TPPZone)
_, err = tpp.ReadZoneConfiguration()
if err == nil {
t.Fatalf("Auth with wrong token should fail")
}
}
func TestAuthorizeToTPP(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, ctx.TPPurl)
}
auth := &endpoint.Authentication{User: ctx.TPPuser, Password: ctx.TPPPassword}
err = tpp.Authenticate(auth)
if err != nil {
t.Fatalf("err is not nil, err: %s, %+v", err, auth)
}
}
func TestBadAuthorizeToTPP(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, ctx.TPPurl)
}
err = tpp.Authenticate(&endpoint.Authentication{User: ctx.TPPuser, Password: "wrongPassword"})
if err == nil {
t.Fatalf("err should not be nil, bad password was used")
}
}
func TestReadConfigData(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
testCases := []struct {
zone string
zoneConfig endpoint.ZoneConfiguration
}{
{getPolicyDN(ctx.TPPZone), endpoint.ZoneConfiguration{
Organization: "Venafi Inc.",
OrganizationalUnit: []string{"Integrations"},
Country: "US",
Province: "Utah",
Locality: "Salt Lake",
HashAlgorithm: x509.SHA256WithRSA,
KeyConfiguration: &endpoint.AllowedKeyConfiguration{KeySizes: []int{2048}},
CustomAttributeValues: make(map[string]string),
}},
{getPolicyDN(ctx.TPPZoneRestricted), endpoint.ZoneConfiguration{
Organization: "Venafi Inc.",
OrganizationalUnit: []string{"Integration"},
Country: "US",
Province: "Utah",
Locality: "Salt Lake",
HashAlgorithm: x509.SHA256WithRSA,
KeyConfiguration: &endpoint.AllowedKeyConfiguration{KeySizes: []int{2048}},
CustomAttributeValues: make(map[string]string),
}},
}
for _, c := range testCases {
tpp.SetZone(c.zone)
zoneConfig, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("%s", err)
}
zoneConfig.Policy = endpoint.Policy{}
if err != nil {
t.Fatalf("%s", err)
}
if !reflect.DeepEqual(*zoneConfig, c.zoneConfig) {
t.Fatalf("zone config for zone %s is not as expected \nget: %+v \nexpect: %+v", c.zone, *zoneConfig, c.zoneConfig)
}
}
tpp.SetZone("Wrong Zone")
_, err = tpp.ReadZoneConfiguration()
if err == nil {
t.Fatalf("err should be not nil for not existed zone")
}
}
func TestBadReadConfigData(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, "notexistedzone")
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
_, err = tpp.ReadZoneConfiguration()
if err == nil {
t.Fatalf("err should not be nil, invalid policy was used")
}
}
func TestRequestCertificateUserPassword(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{User: ctx.TPPuser, Password: ctx.TPPPassword})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
DoRequestCertificate(t, tpp)
}
func TestRequestCertificateToken(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
DoRequestCertificate(t, tpp)
}
func TestRequestCertificateWithValidHours(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
DoRequestCertificateWithValidHours(t, tpp)
}
func DoRequestCertificateWithValidHours(t *testing.T, tpp *Connector) {
config, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
cn := test.RandCN()
req := &certificate.Request{Timeout: time.Second * 30}
req.Subject.CommonName = cn
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
u := url.URL{Scheme: "https", Host: "example.com", Path: "/test"}
req.URIs = []*url.URL{&u}
req.FriendlyName = cn
req.CustomFields = []certificate.CustomField{
{Name: "custom", Value: "2019-10-10"},
}
validHours := 144
req.ValidityHours = validHours
req.IssuerHint = "MICROSOFT"
err = tpp.GenerateRequest(config, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
t.Logf("getPolicyDN(ctx.TPPZone) = %s", getPolicyDN(ctx.TPPZone))
req.PickupID, err = tpp.RequestCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
certCollections, err := tpp.RetrieveCertificate(req)
if err != nil {
t.Fatal(err)
}
p, _ := pem.Decode([]byte(certCollections.Certificate))
cert, err := x509.ParseCertificate(p.Bytes)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
certValidUntil := cert.NotAfter.Format("2006-01-02")
//need to convert local date on utc, since the certificate' NotAfter value we got on previous step, is on utc
//so for comparing them we need to have both dates on utc.
loc, _ := time.LoadLocation("UTC")
utcNow := time.Now().In(loc)
expectedValidDate := utcNow.AddDate(0, 0, validHours/24).Format("2006-01-02")
if expectedValidDate != certValidUntil {
t.Fatalf("Expiration date is different than expected, expected: %s, but got %s: ", expectedValidDate, certValidUntil)
}
}
func DoRequestCertificate(t *testing.T, tpp *Connector) {
config, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
cn := test.RandCN()
req := &certificate.Request{Timeout: time.Second * 30}
req.Subject.CommonName = cn
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
u := url.URL{Scheme: "https", Host: "example.com", Path: "/test"}
req.URIs = []*url.URL{&u}
req.FriendlyName = cn
req.CustomFields = []certificate.CustomField{
{Name: "custom", Value: "2019-10-10"},
}
err = tpp.GenerateRequest(config, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
t.Logf("getPolicyDN(ctx.TPPZone) = %s", getPolicyDN(ctx.TPPZone))
req.PickupID, err = tpp.RequestCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
certCollections, err := tpp.RetrieveCertificate(req)
if err != nil {
t.Fatal(err)
}
p, _ := pem.Decode([]byte(certCollections.Certificate))
cert, err := x509.ParseCertificate(p.Bytes)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
if cert.Subject.CommonName != cn {
t.Fatalf("mismatched common names: %v and %v", cn, cert.Subject.CommonName)
}
if cert.URIs[0].String() != u.String() {
t.Fatalf("mismatched URIs: %v and %v", u.String(), cert.URIs[0].String())
}
}
func TestRequestCertificateServiceGenerated(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
config, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatal("failed to read zone configuration")
}
cn := test.RandCN()
req := &certificate.Request{}
req.Subject.CommonName = cn
req.KeyLength = 2048
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
req.FriendlyName = cn
req.CsrOrigin = certificate.ServiceGeneratedCSR
req.FetchPrivateKey = true
req.KeyPassword = "newPassw0rd!"
config.UpdateCertificateRequest(req)
pickupId, err := tpp.RequestCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
req.PickupID = pickupId
req.ChainOption = certificate.ChainOptionIgnore
t.Log(pickupId)
var isPending = true
var pcc *certificate.PEMCollection
for isPending {
t.Logf("%s is pending...", pickupId)
time.Sleep(time.Second * 1)
pcc, err = tpp.RetrieveCertificate(req)
_, isPending = err.(endpoint.ErrCertificatePending)
}
if err != nil {
t.Fatalf("%s, request was %+v", err, req)
}
if pcc.PrivateKey == "" {
t.Fatalf("Private key was not returned by endpoint")
}
t.Logf("%+v", pcc)
}
func TestRetrieveNonIssuedCertificate(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
config, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
req := &certificate.Request{}
req.Subject.CommonName = "vcert.test.vfidev.com"
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
req.FriendlyName = fmt.Sprintf("vcert integration test - %d", time.Now().Unix())
err = tpp.GenerateRequest(config, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
requestID, err := tpp.RequestCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
req.PickupID = requestID
req.ChainOption = certificate.ChainOptionIgnore
_, err = tpp.RetrieveCertificate(req)
if err == nil {
t.Fatalf("Error should not be nil, certificate has not been issued.")
}
}
func TestRevokeCertificate(t *testing.T) {
cn := "www-1.venqa.venafi.com"
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
config, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
req := &certificate.Request{}
req.Subject.CommonName = cn
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
// req.FriendlyName = fmt.Sprintf("vcert integration test - %d", time.Now().Unix())
err = tpp.GenerateRequest(config, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
certDN, err := tpp.RequestCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
req.PickupID = certDN
req.ChainOption = certificate.ChainOptionIgnore
t.Logf("waiting for %s to be ready", certDN)
var isPending = true
for isPending {
t.Logf("%s is pending...", certDN)
time.Sleep(time.Second * 1)
_, err = tpp.RetrieveCertificate(req)
_, isPending = err.(endpoint.ErrCertificatePending)
}
if err != nil {
t.Fatalf("Error should not be nil, certificate has not been issued. err: %s", err)
}
t.Logf("Start revocation for %s", certDN)
revReq := &certificate.RevocationRequest{CertificateDN: certDN, Disable: false}
err = tpp.RevokeCertificate(revReq)
if err != nil {
t.Fatalf("%s", err)
}
}
func TestRevokeNonIssuedCertificate(t *testing.T) {
cn := "does-not-exist.venqa.venafi.com"
certDN := fmt.Sprintf(`\VED\Policy\%s\%s`, ctx.TPPZone, cn)
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
revReq := &certificate.RevocationRequest{CertificateDN: certDN, Disable: false}
err = tpp.RevokeCertificate(revReq)
if err == nil {
t.Fatalf("It should NOT revoke certificate at %s which doesn't exist", certDN)
}
}
func TestRevokeAndDisableCertificate(t *testing.T) {
cn := test.RandCN()
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
config, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
req := &certificate.Request{}
req.Subject.CommonName = cn
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
// req.FriendlyName = fmt.Sprintf("vcert integration test - %d", time.Now().Unix())
err = tpp.GenerateRequest(config, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
certDN, err := tpp.RequestCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
t.Logf("waiting for %s to be ready", certDN)
var isPending = true
for isPending {
t.Logf("%s is pending...", certDN)
time.Sleep(time.Second * 1)
req.PickupID = certDN
req.ChainOption = certificate.ChainOptionIgnore
_, err = tpp.RetrieveCertificate(req)
_, isPending = err.(endpoint.ErrCertificatePending)
}
if err != nil {
t.Fatalf("Error should not be nil, certificate has not been issued.")
}
t.Logf("Start revocation for %s", certDN)
revReq := &certificate.RevocationRequest{CertificateDN: certDN, Disable: true}
err = tpp.RevokeCertificate(revReq)
if err != nil {
t.Fatalf("%s", err)
}
t.Logf("trying to enroll %s again after revoked with Disable=true", certDN)
err = tpp.GenerateRequest(config, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
certDN, err = tpp.RequestCertificate(req)
if err == nil {
t.Fatalf("Certificate/Request should return error if DN has been revoked with Disable=true")
}
}
func TestRenewCertificate(t *testing.T) {
cn := test.RandCN()
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
config, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
req := &certificate.Request{}
req.Subject.CommonName = cn
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
req.CsrOrigin = certificate.ServiceGeneratedCSR
err = tpp.GenerateRequest(config, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
certDN, err := tpp.RequestCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
oldCert := func(certDN string) *x509.Certificate {
req := &certificate.Request{}
req.PickupID = certDN
var isPending = true
var pcc *certificate.PEMCollection
for isPending {
t.Logf("%s is pending...", certDN)
time.Sleep(time.Second * 1)
pcc, err = tpp.RetrieveCertificate(req)
_, isPending = err.(endpoint.ErrCertificatePending)
}
if err != nil {
t.Fatalf("certificate has not been issued: %s", err)
}
p, _ := pem.Decode([]byte(pcc.Certificate))
oldCert, err := x509.ParseCertificate(p.Bytes)
if err != nil {
t.Fatal(err)
}
return oldCert
}(certDN)
t.Logf("retrieved certificate, Serial is %s", oldCert.SerialNumber)
renewByCertificateDN := &certificate.RenewalRequest{CertificateDN: certDN}
reqId1, err := tpp.RenewCertificate(renewByCertificateDN)
if err != nil {
t.Fatal(err)
}
t.Logf("requested renewal for %s, will pickup by %s", certDN, reqId1)
newCert := func(certDN string) *x509.Certificate {
req := &certificate.Request{}
req.PickupID = certDN
var isPending = true
var pcc *certificate.PEMCollection
for isPending {
t.Logf("%s is pending...", certDN)
time.Sleep(time.Second * 1)
pcc, err = tpp.RetrieveCertificate(req)
_, isPending = err.(endpoint.ErrCertificatePending)
}
if err != nil {
t.Fatalf("certificate has not been issued: %s", err)
}
p, _ := pem.Decode([]byte(pcc.Certificate))
oldCert, err := x509.ParseCertificate(p.Bytes)
if err != nil {
t.Fatal(err)
}
return oldCert
}(reqId1)
t.Logf("retrieved certificate, Serial is %s", newCert.SerialNumber)
if newCert.SerialNumber == oldCert.SerialNumber {
t.Fatal("old and new certificates' serial numbers should not be equal")
}
}
func TestRenewCertRestoringValues(t *testing.T) {
cn := test.RandCN()
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZoneECDSA)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
req := &certificate.Request{}
req.Subject.CommonName = cn
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
req.KeyType = certificate.KeyTypeECDSA
req.KeyCurve = certificate.EllipticCurveP521
req.CsrOrigin = certificate.ServiceGeneratedCSR
req.Timeout = time.Second * 10
err = tpp.GenerateRequest(&endpoint.ZoneConfiguration{}, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
_, err = tpp.RequestCertificate(req)
if err != nil {
t.Fatal(err)
}
req.FetchPrivateKey = true
req.KeyPassword = os.Getenv("TPP_PASSWORD")
pcc, err := tpp.RetrieveCertificate(req)
if err != nil {
t.Fatal(err)
}
p, _ := pem.Decode([]byte(pcc.Certificate))
oldCert, err := x509.ParseCertificate(p.Bytes)
if err != nil {
t.Fatal(err)
}
oldKey, ok := oldCert.PublicKey.(*ecdsa.PublicKey)
if !ok {
t.Fatal("bad key type")
}
if oldKey.Curve.Params().Name != "P-521" {
t.Fatalf("bad curve %v", oldKey.Curve.Params().Name)
}
renewReq := certificate.RenewalRequest{
CertificateDN: req.PickupID,
}
pickupdID, err := tpp.RenewCertificate(&renewReq)
if err != nil {
t.Fatal(err)
}
req = &certificate.Request{PickupID: pickupdID, Timeout: 30 * time.Second}
pcc, err = tpp.RetrieveCertificate(req)
if err != nil {
t.Fatal(err)
}
p, _ = pem.Decode([]byte(pcc.Certificate))
newCert, err := x509.ParseCertificate(p.Bytes)
if err != nil {
t.Fatal(err)
}
newKey, ok := newCert.PublicKey.(*ecdsa.PublicKey)
if !ok {
t.Fatal("bad key type")
}
if newKey.Curve.Params().Name != "P-521" {
t.Fatalf("bad curve %v", newKey.Curve.Params().Name)
}
//todo: uncomment after renew refactoring
//if string(oldKey.X.Bytes()) == string(newKey.X.Bytes()) || string(oldKey.Y.Bytes()) == string(newKey.Y.Bytes()) {
// t.Fatal("key reuse")
//}
}
const crt = `-----BEGIN CERTIFICATE-----
MIIDdjCCAl6gAwIBAgIRAPqSZQ04IjWgO2rwIDRcOY8wDQYJKoZIhvcNAQENBQAw
gYAxCzAJBgNVBAYTAlVTMQ0wCwYDVQQIDARVdGFoMRcwFQYDVQQHDA5TYWx0IExh
a2UgQ2l0eTEPMA0GA1UECgwGVmVuYWZpMRswGQYDVQQLDBJOT1QgRk9SIFBST0RV
Q1RJT04xGzAZBgNVBAMMElZDZXJ0IFRlc3QgTW9kZSBDQTAeFw0xODA5MTIxMzUw
MzNaFw0xODEyMTExMzUwMzNaMCQxIjAgBgNVBAMTGWltcG9ydC52ZW5hZmkuZXhh
bXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChjQk0jSE5
ktVdH8bAM0QCpGs1rOOVMmRkMc7d4hQ6bTlFlIypMq9t+1O2Z8i4fiKDS7vSBmBo
WBgN9e0fbAnKEvBIcNLBS4lmwzRDxDCrNV3Dr5s+yJtUw9V2XBwiXbtW7qs5+c0O
y7a2S/5HudXUlAuXf7SF4MboMMpHRg+UkyA4j0peir8PtmlJjlYBt3lZdaeLlD6F
EIlIVQFZ6ulUF/kULhxhTUl2yNUUzJ/bqJlhFU6pkL+GoW1lnaZ8FYXwA1EKYyRk
DYL581eqvIBJY9tCNWbOdU1r+5wR4OOKe/WWWhcDC6nL/M8ZYhfQg1nHoD58A8Dk
H4AAt8A3EZpdAgMBAAGjRjBEMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB
/wQCMAAwHwYDVR0jBBgwFoAUzqRFDvLX0mz4AjPb45tLGavm8AcwDQYJKoZIhvcN
AQENBQADggEBABa4wqh+A63O5PHrdUCBSmQs9ve/oIXj561VBmqXkTHLrtKtbtcA
yvsMi8RD8BibBAsUCljkCmLoQD/XeQFtsPlMAxisSMYhChh58008CIYDR8Nf/qoe
YfzdMB/3VWCqTn9KGF8aMKeQvbFvuqmbtdCv//eYe6mNe2fa/x6PSdGMi4BPmjUC
PmBT4p1iwMtu8LnL4UM4awjmmExR4X4rafcyGEbf0D/CRfhDLSwxvrrVcWd6TMMY
HPZ/pw//+UrVLgEEsyM2zwf+LokbszPBvPAtHMJtr7Pnq2MQtEEkLfPqOWG3ol1H
t+4v2LIW1q4GkwOUjPqgyIaJC5jj5pH9/g8=
-----END CERTIFICATE-----`
const pk = `-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAoY0JNI0hOZLVXR/GwDNEAqRrNazjlTJkZDHO3eIUOm05RZSM
qTKvbftTtmfIuH4ig0u70gZgaFgYDfXtH2wJyhLwSHDSwUuJZsM0Q8QwqzVdw6+b
PsibVMPVdlwcIl27Vu6rOfnNDsu2tkv+R7nV1JQLl3+0heDG6DDKR0YPlJMgOI9K
Xoq/D7ZpSY5WAbd5WXWni5Q+hRCJSFUBWerpVBf5FC4cYU1JdsjVFMyf26iZYRVO
qZC/hqFtZZ2mfBWF8ANRCmMkZA2C+fNXqryASWPbQjVmznVNa/ucEeDjinv1lloX
Awupy/zPGWIX0INZx6A+fAPA5B+AALfANxGaXQIDAQABAoIBAE7of6WOhbsEcHkz
CzZYFBEiVEd8chEu8wBJn9ybD/xV21KUM3x1iGC1EPeYi98ppRvygwQcHzz4Qo+X
HsJpWAK+62TGzvqhNbTfBglPq+IEiA8MGE07WTu3B+3vIcLbe6UDoNkJndJrSIyU
Y9iO+dYClgLi2r9FwoIpSrQzkWqlB3edle4Nq1WABtWTOSDYysz1gk0KrLmQQfXP
CPiwkL0SjB+sfbOiVX0B2liV2oxJ5VZWNo/250wFcvrcYrgTNtEVNMXtpN0tnRMH
NPwnY+B9WGu/NVhtvOcOTPHq9xQhbmBCS1axikizCaIqEOyegdeDJ4ASJnVybfCA
KzjoCpUCgYEAwOmeEvzSP8hCKtLPU8QDBA1y+mEvZMwBY4qr3hfqv3qa0QmFvxkk
7Ubmy2oFOoUnVgnhRzAf/bajbkz4ScUgd2JrUdIEhNNVwDn/llnS/UHBlZY++BtW
mvyon9ObXgPNPoHcJqzrqARu8PPJQEsZ+xjxM/gyif3prn6Uct6R8B8CgYEA1mHd
Astwht39z16FoX9rQRGgx64Z0nesfTjl+4mkypz6ukkcfU1GjobqEG3k666+OJk1
SRs8s20Pahrh21LO5x/QtvChhZ+nIedqlhBlNH9uUJI9ChbUN0luetiSPT8F5aqg
gZMY13K5icAQ+98EcNwl7ZhVPq0BvLlbqTWi9gMCgYEAjtVqoQxob6lKtIJZ19+t
i/aZRyFmAe+6p4UpM8vpl9SjhFrUmGV5neV9ROc+79FfCqlOD3NmfGgaIbUDsTsv
irVoWLBzgBUpzKYkw6HGQpXJS4RvIyy6tw6Tm6MFylpuQPXNlyU5ZrHBos4eGGiC
2BPjo2MFqH5D41r9dv+sdmkCgYEAtSJYx3y2pe04/xYhGFP9fivzyeMrRC4DWoZR
oxcoWl0KZ41QefppzBDoAVuo2Q17AX1JjWxq/DsAlCkEffhYguXZxkhIYQuE/lt2
LjbKG/IzdfYphrXFNrVfmIIWBZOTWvqwxOpRSfBQHbhfYUCMkwMfNMHJ/LvWxOtk
K/L6rpsCgYB6p9RU2kXexAh9kUpbGqVeJBoIh6ArXHgepESE/7dPw26D0DM0mef0
X1MasxN3JF7ZsSGfcCLXnICSJHuNTy9WztqF3hUbQwYd9vmZxtzAo5/fK4DVAaXS
ZtIVl/CH/az0xqLKWIlmWOip9SfUVlZdgege+PlQtRqoFVOsH8+MEg==
-----END RSA PRIVATE KEY-----`
func TestImportCertificate(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
importReq := &certificate.ImportRequest{
// PolicyDN should be like "\\VED\\Policy\\devops\\vcert", or empty (c.zone is used then)
PolicyDN: getPolicyDN(ctx.TPPZone),
ObjectName: "import12348.venafi.example.com",
CertificateData: crt,
PrivateKeyData: pk,
Reconcile: false,
}
pp(importReq)
importResp, err := tpp.ImportCertificate(importReq)
if err != nil {
t.Fatalf("failed to import certificate: %s", err)
}
pp(importResp)
}
func TestReadPolicyConfiguration(t *testing.T) {
//todo: add more zones tests
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
cases := []struct {
zone string
policy endpoint.Policy
}{
{
ctx.TPPZone, // todo: replace with env variable
endpoint.Policy{
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]endpoint.AllowedKeyConfiguration{
{certificate.KeyTypeRSA, certificate.AllSupportedKeySizes(), nil},
{certificate.KeyTypeECDSA, nil, certificate.AllSupportedCurves()},
},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
true,
true,
},
},
{
ctx.TPPZoneRestricted,
endpoint.Policy{
[]string{`^([\p{L}\p{N}-*]+\.)*vfidev\.com$`, `^([\p{L}\p{N}-*]+\.)*vfidev\.net$`, `^([\p{L}\p{N}-*]+\.)*vfide\.org$`},
[]string{`^Venafi Inc\.$`},
[]string{"^Integration$"},
[]string{"^Utah$"},
[]string{"^Salt Lake$"},
[]string{"^US$"},
[]endpoint.AllowedKeyConfiguration{{certificate.KeyTypeRSA, []int{2048, 4096, 8192}, nil}},
[]string{`^([\p{L}\p{N}-*]+\.)*vfidev\.com$`, `^([\p{L}\p{N}-*]+\.)*vfidev\.net$`, `^([\p{L}\p{N}-*]+\.)*vfide\.org$`},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
true,
true,
},
},
{
ctx.TPPZoneECDSA,
endpoint.Policy{
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]endpoint.AllowedKeyConfiguration{
{certificate.KeyTypeECDSA, nil, []certificate.EllipticCurve{certificate.EllipticCurveP521}},
},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
[]string{".*"},
true,
true,
},
},
}
for _, c := range cases {
tpp.SetZone(c.zone)
policy, err := tpp.ReadPolicyConfiguration()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(*policy, c.policy) {
t.Fatalf("policy for zone %s is not as expected \nget: %+v \nexpect: %+v", c.zone, *policy, c.policy)
}
}
}
func pp(a interface{}) {
b, err := json.MarshalIndent(a, "", " ")
if err != nil {
fmt.Println("error:", err)
}
fmt.Println(string(b))
}
func Test_EnrollDoesntChange(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
config, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
cn := test.RandCN()
req := &certificate.Request{}
req.Subject.CommonName = cn
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
req.PrivateKey = pemRSADecode([]byte(pk))
req.FriendlyName = cn
err = tpp.GenerateRequest(config, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
t.Logf("getPolicyDN(ctx.TPPZone) = %s", getPolicyDN(ctx.TPPZone))
_, err = tpp.RequestCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
privKey, ok := req.PrivateKey.(*rsa.PrivateKey)
fmt.Println(privKey.D.Bytes())
if !ok || privKey.D.Cmp(pemRSADecode([]byte(pk)).D) != 0 {
t.Fatal("key before and key after requesting don`t match")
}
}
func pemRSADecode(priv []byte) *rsa.PrivateKey {
privPem, _ := pem.Decode(priv)
parsedKey, err := x509.ParsePKCS1PrivateKey(privPem.Bytes)
if err != nil {
panic(err)
}
return parsedKey
}
func TestNormalizeURL(t *testing.T) {
url := "http://localhost/vedsdk/"
modifiedURL, err := normalizeURL(url)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, url)
}
if !strings.EqualFold(modifiedURL, expectedURL) {
t.Fatalf("Base URL did not match expected value. Expected: %s Actual: %s", expectedURL, modifiedURL)
}
url = "http://localhost"
modifiedURL = ""
modifiedURL, err = normalizeURL(url)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, url)
}
if !strings.EqualFold(modifiedURL, expectedURL) {
t.Fatalf("Base URL did not match expected value. Expected: %s Actual: %s", expectedURL, modifiedURL)
}
url = "http://localhost/vedsdk"
modifiedURL = ""
modifiedURL, err = normalizeURL(url)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, url)
}
if !strings.EqualFold(modifiedURL, expectedURL) {
t.Fatalf("Base URL did not match expected value. Expected: %s Actual: %s", expectedURL, modifiedURL)
}
url = "localhost/vedsdk"
modifiedURL = ""
modifiedURL, err = normalizeURL(url)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, url)
}
if !strings.EqualFold(modifiedURL, expectedURL) {
t.Fatalf("Base URL did not match expected value. Expected: %s Actual: %s", expectedURL, modifiedURL)
}
url = "ftp://wrongurlformat.com"
modifiedURL = ""
modifiedURL, err = normalizeURL(url)
if err == nil {
t.Fatalf("err was not expected to be nil. url: %s", url)
}
if strings.EqualFold(modifiedURL, expectedURL) {
t.Fatalf("Base URL should not match expected value. Expected: %s Actual: %s", expectedURL, modifiedURL)
}
}
func Test_GetCertificateList(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
for _, count := range []int{10, 100, 101, 153, 200, 300} {
timeStarted := time.Now()
l, err := tpp.ListCertificates(endpoint.Filter{Limit: &count})
if err != nil {
t.Fatal(err)
}
set := make(map[string]struct{})
for _, c := range l {
set[c.Thumbprint] = struct{}{}
if c.ValidTo.Before(timeStarted) {
t.Errorf("cert %s is expired: %v", c.Thumbprint, c.ValidTo)
}
}
if len(set) != count {
t.Errorf("mismatched certificates number: wait %d, got %d for zone %s", count, len(set), ctx.TPPZone)
}
}
}
func Test_GetCertificateListFull(t *testing.T) {
const certPem = `-----BEGIN CERTIFICATE-----
MIICZjCCAcegAwIBAgIIe1Dq0CjsAx8wCgYIKoZIzj0EAwQwEjEQMA4GA1UEAxMH
VGVzdCBDQTAeFw0xOTExMjAxNDU3MDBaFw0xOTExMjYxNDUwMDBaMHoxCzAJBgNV
BAYTAlVTMQ0wCwYDVQQIEwRVdGFoMRIwEAYDVQQHEwlTYWx0IExha2UxFDASBgNV
BAoTC1ZlYW5maSBJbmMuMRQwEgYDVQQLEwtJbnRlZ3JhdGlvbjEcMBoGA1UEAxMT
ZXhwaXJlZDEudmZpZGV2LmNvbTCBmzAQBgcqhkjOPQIBBgUrgQQAIwOBhgAEAWNR
bh7m40QpJAMV9DQMFQA6ZwIwQpBZp470b4pWt5Ih+64oLHMgwDTOkjv701hCYWK0
BdxNXYCpEGvnA3BahHprAaQHsDWxHygKJdtNeGW8ein7hN1CdMtm72aFp5DHI82U
jDWQHczRatUpOEdzjB+9JwYtI1BIFTVA8xvpRrQwEqwio1wwWjAMBgNVHRMBAf8E
AjAAMB0GA1UdDgQWBBSgTpxmCxUnyqB/xpXevPcQklFtxDALBgNVHQ8EBAMCBeAw
HgYDVR0RBBcwFYITZXhwaXJlZDEudmZpZGV2LmNvbTAKBggqhkjOPQQDBAOBjAAw
gYgCQgFrpA/sLEzrWumVicNJGLHFK2FhhMxOxOeC1Fk3HTJDiMfxHMe1QBP++wLp
vOjeQhOnqrPdQINzUCKMSuqxqFGbQAJCAZs3Be1Pz6eeKHNLzr7mYQ2/pWSjfun4
45nAry0Rb308mXI49fEprVJDQ0zyb3gM8Z8OA0wDyaQ+pcwloQkvOAM2
-----END CERTIFICATE-----
`
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZoneRestricted)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
importReq := certificate.ImportRequest{CertificateData: certPem}
_, err = tpp.ImportCertificate(&importReq)
if err != nil {
t.Fatal(err)
}
validList, err := tpp.ListCertificates(endpoint.Filter{})
if err != nil {
t.Fatal(err)
}
fullList, err := tpp.ListCertificates(endpoint.Filter{WithExpired: true})
if err != nil {
t.Fatal(err)
}
if len(validList) >= len(fullList) {
t.Fatalf("valid certificates numbe (%v) should be less than all certificates number (%v)", len(validList), len(fullList))
}
req := certificate.Request{Subject: pkix.Name{CommonName: fmt.Sprintf("test%d%d.vfidev.com", time.Now().Unix(), time.Now().Nanosecond())}, KeyType: certificate.KeyTypeRSA, KeyLength: 2048}
err = tpp.GenerateRequest(nil, &req)
if err != nil {
t.Fatal(err)
}
req.PickupID, err = tpp.RequestCertificate(&req)
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second * 10) //todo: remove after fix bug VEN-54714
validList2, err := tpp.ListCertificates(endpoint.Filter{})
if err != nil {
t.Fatal(err)
}
fullList2, err := tpp.ListCertificates(endpoint.Filter{WithExpired: true})
if err != nil {
t.Fatal(err)
}
if len(fullList)+1 != len(fullList2) {
t.Fatal("list should be longer")
}
if len(validList)+1 != len(validList2) {
t.Fatal("list should be longer")
}
}
func TestEnrollWithLocation(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
cn := test.RandCN()
zoneConfig, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatal(err)
}
workload := fmt.Sprintf("workload-%v", time.Now().Unix())
req := certificate.Request{}
req.Subject.CommonName = cn
req.Timeout = time.Second * 10
req.Location = &certificate.Location{
Instance: "instance",
Workload: workload,
TLSAddress: "example.com:443",
}
err = tpp.GenerateRequest(zoneConfig, &req)
if err != nil {
t.Fatal(err)
}
_, err = tpp.RequestCertificate(&req)
if err != nil {
t.Fatal(err)
}
req = certificate.Request{}
req.Subject.CommonName = cn
req.Timeout = time.Second * 10
req.Location = &certificate.Location{
Instance: "instance",
Workload: workload,
TLSAddress: "example.com:443",
}
err = tpp.GenerateRequest(zoneConfig, &req)
if err != nil {
t.Fatal(err)
}
_, err = tpp.RequestCertificate(&req)
if err == nil {
t.Fatal("Should fail with devices conflict")
}
req = certificate.Request{}
req.Subject.CommonName = cn
req.Timeout = time.Second * 10
req.Location = &certificate.Location{
Instance: "instance",
Workload: workload,
TLSAddress: "example.com:443",
Replace: true,
}
err = tpp.GenerateRequest(zoneConfig, &req)
if err != nil {
t.Fatal(err)
}
_, err = tpp.RequestCertificate(&req)
if err != nil {
t.Fatal(err)
}
//request same certificate with different workload but without replace
req.Location = &certificate.Location{
Instance: "instance",
Workload: workload + "-1",
TLSAddress: "example.com:443",
Replace: false,
}
err = tpp.GenerateRequest(zoneConfig, &req)
if err != nil {
t.Fatal(err)
}
_, err = tpp.RequestCertificate(&req)
if err != nil {
t.Fatal(err)
}
//request same certificate with same workload and without replace
req.Location = &certificate.Location{
Instance: "instance",
Workload: workload + "-1",
TLSAddress: "example.com:443",
Replace: false,
}
err = tpp.GenerateRequest(zoneConfig, &req)
if err != nil {
t.Fatal(err)
}
_, err = tpp.RequestCertificate(&req)
if err == nil {
t.Fatal("There should be a error if we're trying to set same device twice in location")
}
expected_message := "vcert error: your data contains problems: instance"
if !strings.Contains(err.Error(), expected_message) {
t.Fatalf("We should exit with error message '%s' if we're trying to set same device twice in location. But we vcert exited with error: %s", expected_message, err)
}
//TODO: test that only instance from parameters is dissociated
//TODO: test app info with different kind of strings ???
//TODO: Check origin using config/read post request example:
//{
// "ObjectDN":"\\VED\\Policy\\devops\\vcert\\1582237636-pgqlx.venafi.example.com",
// "AttributeName":"Origin"
//}
}
func TestOmitSans(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
zone, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatal(err)
}
cn := test.RandCN()
req := certificate.Request{
Subject: pkix.Name{
CommonName: cn,
},
KeyLength: 2048,
DNSNames: []string{"www." + cn, cn},
OmitSANs: true,
CsrOrigin: certificate.ServiceGeneratedCSR,
Timeout: 30 * time.Second,
}
tppReq, err := prepareRequest(&req, tpp.zone)
if err != nil {
t.Fatal(err)
}
if len(tppReq.SubjectAltNames) > 0 {
t.Fatal("certificate should have 0 SANs")
}
req = certificate.Request{
Subject: pkix.Name{
CommonName: cn,
},
KeyLength: 2048,
DNSNames: []string{"www." + cn, cn},
OmitSANs: true,
CsrOrigin: certificate.LocalGeneratedCSR,
Timeout: 30 * time.Second,
}
err = tpp.GenerateRequest(zone, &req)
if err != nil {
t.Fatal(err)
}
b, _ := pem.Decode(req.GetCSR())
csr, err := x509.ParseCertificateRequest(b.Bytes)
if err != nil {
t.Fatal(err)
}
if len(csr.DNSNames) > 0 {
t.Fatal("certificate should have 0 SANs")
}
_, err = tpp.RequestCertificate(&req)
if err != nil {
t.Fatal(err)
}
_, err = tpp.RetrieveCertificate(&req)
if err != nil {
t.Fatal(err)
}
}
func TestSetPolicy(t *testing.T) {
policyName := os.Getenv("TPP_PM_ROOT") + "\\" + test.RandTppPolicyName()
ctx.CloudZone = policyName
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
ps := test.GetTppPolicySpecification()
_, err = tpp.SetPolicy(policyName, ps)
if err != nil {
t.Fatalf("%s", err)
}
}
func TestGetPolicy(t *testing.T) {
t.Skip() //this is just for development purpose
policyName := os.Getenv("TPP_POLICY_MANAGEMENT_SAMPLE")
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
specifiedPS := test.GetTppPolicySpecification()
ps, err := tpp.GetPolicy(policyName)
if err != nil {
t.Fatalf("%s", err)
}
//validate each attribute
//validate subject attributes
if ps == nil {
t.Fatalf("specified Policy wasn't found")
}
if ps.Policy.Domains != nil && specifiedPS.Policy.Domains != nil {
valid := test.IsArrayStringEqual(specifiedPS.Policy.Domains, ps.Policy.Domains)
if !valid {
t.Fatalf("specified domains are different")
}
}
//validate cert authority id.
if specifiedPS.Policy.CertificateAuthority != nil && *(specifiedPS.Policy.CertificateAuthority) != "" {
if ps.Policy.CertificateAuthority == nil || *(ps.Policy.CertificateAuthority) == "" {
t.Fatalf("venafi policy doesn't have a certificate authority")
}
if *(ps.Policy.CertificateAuthority) != *(specifiedPS.Policy.CertificateAuthority) {
t.Fatalf("certificate authority value doesn't match, get: %s but expected: %s", *(ps.Policy.CertificateAuthority), *(specifiedPS.Policy.CertificateAuthority))
}
}
if specifiedPS.Policy.Subject.Orgs != nil {
if ps.Policy.Subject.Orgs == nil {
t.Fatalf("specified policy orgs are not specified")
}
valid := test.IsArrayStringEqual(specifiedPS.Policy.Subject.Orgs, ps.Policy.Subject.Orgs)
if !valid {
t.Fatalf("specified policy orgs are different")
}
}
if specifiedPS.Policy.Subject.OrgUnits != nil {
if ps.Policy.Subject.OrgUnits == nil {
t.Fatalf("specified policy orgs units are not specified")
}
valid := test.IsArrayStringEqual(specifiedPS.Policy.Subject.OrgUnits, ps.Policy.Subject.OrgUnits)
if !valid {
t.Fatalf("specified policy orgs units are different")
}
}
if specifiedPS.Policy.Subject.Localities != nil {
if ps.Policy.Subject.Localities == nil {
t.Fatalf("specified policy localities are not specified")
}
valid := test.IsArrayStringEqual(specifiedPS.Policy.Subject.Localities, ps.Policy.Subject.Localities)
if !valid {
t.Fatalf("specified policy localities are different")
}
}
if specifiedPS.Policy.Subject.States != nil {
if ps.Policy.Subject.States == nil {
t.Fatalf("specified policy states are not specified")
}
valid := test.IsArrayStringEqual(specifiedPS.Policy.Subject.States, ps.Policy.Subject.States)
if !valid {
t.Fatalf("specified policy states are different")
}
}
if specifiedPS.Policy.Subject.Countries != nil {
if ps.Policy.Subject.Countries == nil {
t.Fatalf("specified policy countries are not specified")
}
valid := test.IsArrayStringEqual(specifiedPS.Policy.Subject.Countries, ps.Policy.Subject.Countries)
if !valid {
t.Fatalf("specified policy countries are different")
}
}
//validate key pair values.
if specifiedPS.Policy.KeyPair.KeyTypes != nil {
if ps.Policy.KeyPair.KeyTypes == nil {
t.Fatalf("specified policy key types are not specified")
}
valid := test.IsArrayStringEqual(specifiedPS.Policy.KeyPair.KeyTypes, ps.Policy.KeyPair.KeyTypes)
if !valid {
t.Fatalf("specified policy key types are different")
}
}
if specifiedPS.Policy.KeyPair.RsaKeySizes != nil {
if ps.Policy.KeyPair.RsaKeySizes == nil {
t.Fatalf("specified policy rsa key sizes are not specified")
}
valid := test.IsArrayIntEqual(specifiedPS.Policy.KeyPair.RsaKeySizes, ps.Policy.KeyPair.RsaKeySizes)
if !valid {
t.Fatalf("specified policy rsa key sizes are different")
}
}
if specifiedPS.Policy.KeyPair.ReuseAllowed != nil {
if ps.Policy.KeyPair.ReuseAllowed == nil {
t.Fatalf("specified policy rsa key sizes are not specified")
}
if *(ps.Policy.KeyPair.ReuseAllowed) != *(specifiedPS.Policy.KeyPair.ReuseAllowed) {
t.Fatalf("specified policy rsa key sizes are different")
}
}
}
func TestSetEmptyPolicy(t *testing.T) {
policyName := os.Getenv("TPP_PM_ROOT") + "\\" + test.RandTppPolicyName()
ctx.CloudZone = policyName
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
specification := policy.PolicySpecification{}
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
_, err = tpp.SetPolicy(policyName, &specification)
if err != nil {
t.Fatalf("%s", err)
}
}
func TestSetDefaultPolicyValuesAndValidate(t *testing.T) {
specification := test.GetTppPolicySpecification()
specification.Policy = nil
ec := "P384"
serGenerated := true
specification.Default.KeyPair.EllipticCurve = &ec
specification.Default.KeyPair.ServiceGenerated = &serGenerated
policyName := os.Getenv("TPP_PM_ROOT") + "\\" + test.RandTppPolicyName()
ctx.CloudZone = policyName
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
_, err = tpp.SetPolicy(policyName, specification)
if err != nil {
t.Fatalf("%s", err)
}
//get the created policy
ps, err := tpp.GetPolicy(policyName)
if err != nil {
t.Fatalf("%s", err)
}
if ps.Default == nil {
t.Fatalf("policy's defaults are nil")
}
localDefault := specification.Default
remoteDefault := ps.Default
if *(localDefault.AutoInstalled) != *(remoteDefault.AutoInstalled) {
t.Fatalf("policy's defaults are nil")
}
if remoteDefault.Subject == nil {
t.Fatalf("policy's default subject is nil")
}
if *(remoteDefault.Subject.Locality) != *(localDefault.Subject.Locality) {
t.Fatalf("policy's default locality is different expected: %s but get %s", *(localDefault.Subject.Locality), *(remoteDefault.Subject.Locality))
}
if *(remoteDefault.Subject.Country) != *(localDefault.Subject.Country) {
t.Fatalf("policy's default country is different expected: %s but get %s", *(localDefault.Subject.Country), *(remoteDefault.Subject.Country))
}
if *(remoteDefault.Subject.State) != *(localDefault.Subject.State) {
t.Fatalf("policy's default state is different expected: %s but get %s", *(localDefault.Subject.State), *(remoteDefault.Subject.State))
}
if *(remoteDefault.Subject.Org) != *(localDefault.Subject.Org) {
t.Fatalf("policy's default org is different expected: %s but get %s", *(localDefault.Subject.Org), *(remoteDefault.Subject.Org))
}
valid := test.IsArrayStringEqual(remoteDefault.Subject.OrgUnits, localDefault.Subject.OrgUnits)
if !valid {
t.Fatalf("policy's default orgUnits are different")
}
if remoteDefault.KeyPair == nil {
t.Fatalf("policy's default keyPair is nil")
}
if *(remoteDefault.KeyPair.KeyType) != *(localDefault.KeyPair.KeyType) {
t.Fatalf("policy's default keyType is different expected: %s but get %s", *(localDefault.KeyPair.KeyType), *(remoteDefault.KeyPair.KeyType))
}
/*if *(remoteDefault.KeyPair.EllipticCurve) != *(localDefault.KeyPair.EllipticCurve) {
t.Fatalf("policy's default ellipticCurve is different expected: %s but get %s", *(localDefault.KeyPair.KeyType), * (remoteDefault.KeyPair.KeyType))
}*/
if *(remoteDefault.KeyPair.ServiceGenerated) != *(localDefault.KeyPair.ServiceGenerated) {
t.Fatalf("policy's default serviceGenerated is different expected: %s but get %s", strconv.FormatBool(*(localDefault.KeyPair.ServiceGenerated)), strconv.FormatBool(*(remoteDefault.KeyPair.ServiceGenerated)))
}
if *(remoteDefault.KeyPair.RsaKeySize) != *(localDefault.KeyPair.RsaKeySize) {
t.Fatalf("policy's default RsaKeySize is different expected: %s but get %s", strconv.Itoa(*(localDefault.KeyPair.RsaKeySize)), strconv.Itoa(*(remoteDefault.KeyPair.RsaKeySize)))
}
}
func TestSetPolicyValuesAndValidate(t *testing.T) {
specification := test.GetTppPolicySpecification()
specification.Default = nil
policyName := os.Getenv("TPP_PM_ROOT") + "\\" + test.RandTppPolicyName()
ctx.CloudZone = policyName
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
_, err = tpp.SetPolicy(policyName, specification)
if err != nil {
t.Fatalf("%s", err)
}
//get the created policy
ps, err := tpp.GetPolicy(policyName)
if err != nil {
t.Fatalf("%s", err)
}
if ps.Policy == nil {
t.Fatalf("policy is nil")
}
localPolicy := specification.Policy
remotePolicy := ps.Policy
if *(localPolicy.AutoInstalled) != *(remotePolicy.AutoInstalled) {
t.Fatalf("policy are nil")
}
if remotePolicy.Subject == nil {
t.Fatalf("policy's subject is nil")
}
valid := test.IsArrayStringEqual(remotePolicy.Subject.Localities, localPolicy.Subject.Localities)
if !valid {
t.Fatalf("policy's localities are different expected: %+q but get %+q ", localPolicy.Subject.Localities, remotePolicy.Subject.Localities)
}
valid = test.IsArrayStringEqual(remotePolicy.Subject.Countries, localPolicy.Subject.Countries)
if !valid {
t.Fatalf("policy's countries are different expected: %+q but get %+q", localPolicy.Subject.Countries, remotePolicy.Subject.Countries)
}
valid = test.IsArrayStringEqual(remotePolicy.Subject.States, localPolicy.Subject.States)
if !valid {
t.Fatalf("policy's states are different expected: %+q but get %+q", localPolicy.Subject.States, remotePolicy.Subject.States)
}
valid = test.IsArrayStringEqual(remotePolicy.Subject.Orgs, localPolicy.Subject.Orgs)
if !valid {
t.Fatalf("policy's org are different expected: %+q but get %+q", localPolicy.Subject.Orgs, remotePolicy.Subject.Orgs)
}
valid = test.IsArrayStringEqual(remotePolicy.Subject.OrgUnits, localPolicy.Subject.OrgUnits)
if !valid {
t.Fatalf("policy's org units are different expected: %+q but get %+q", localPolicy.Subject.OrgUnits, remotePolicy.Subject.OrgUnits)
}
if remotePolicy.KeyPair == nil {
t.Fatalf("policy's keyPair is nil")
}
valid = test.IsArrayStringEqual(remotePolicy.KeyPair.KeyTypes, localPolicy.KeyPair.KeyTypes)
if !valid {
t.Fatalf("policy's keyTypes are different expected: %+q but get %+q", localPolicy.KeyPair.KeyTypes, remotePolicy.KeyPair.KeyTypes)
}
if *(remotePolicy.KeyPair.ServiceGenerated) != *(localPolicy.KeyPair.ServiceGenerated) {
t.Fatalf("policy's serviceGenerated is different expected: %s but get %s", strconv.FormatBool(*(localPolicy.KeyPair.ServiceGenerated)), strconv.FormatBool(*(remotePolicy.KeyPair.ServiceGenerated)))
}
valid = test.IsArrayIntEqual(remotePolicy.KeyPair.RsaKeySizes, localPolicy.KeyPair.RsaKeySizes)
if !valid {
t.Fatalf("policy's RsaKeySizes are different expected: %+q but get %+q", localPolicy.KeyPair.RsaKeySizes, remotePolicy.KeyPair.RsaKeySizes)
}
}
func TestCreateSshCertServiceGeneratedKP(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
duration := 4
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
var req = &certificate.SshCertRequest{}
req.KeyId = test.RandSshKeyId()
req.ValidityPeriod = fmt.Sprint(duration, "h")
req.Template = os.Getenv("TPP_SSH_CA")
req.SourceAddresses = []string{"test.com"}
req.Timeout = time.Second * 10
respData, err := tpp.RequestSSHCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
retReq := &certificate.SshCertRequest{
PickupID: respData.DN,
IncludeCertificateDetails: true,
Timeout: time.Duration(10) * time.Second,
}
resp, err := tpp.RetrieveSSHCertificate(retReq)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
if resp.PrivateKeyData == "" {
t.Error("Private key data is empty")
}
if resp.PublicKeyData == "" {
t.Error("Public key data is empty")
}
if resp.CertificateData == "" {
t.Error("Certificate key data is empty")
}
validFrom := util.ConvertSecondsToTime(resp.CertificateDetails.ValidFrom)
validTo := util.ConvertSecondsToTime(resp.CertificateDetails.ValidTo)
durationFromCert := validTo.Sub(validFrom)
hours := durationFromCert.Hours()
intHours := int(hours)
if intHours != duration {
fmt.Errorf("certificate duration is different, expected: %v but got %v", duration, intHours)
}
}
func TestCreateSshCertLocalGeneratedKP(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
duration := 4
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
var req = &certificate.SshCertRequest{}
req.KeyId = test.RandSshKeyId()
priv, pub, err := util.GenerateSshKeyPair(3072, "", req.KeyId)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
if priv == nil {
t.Fatalf("generated private key is nil")
}
if pub == nil {
t.Fatalf("generated public key is nil")
}
req.ValidityPeriod = fmt.Sprint(duration, "h")
req.Template = os.Getenv("TPP_SSH_CA")
req.SourceAddresses = []string{"test.com"}
req.Timeout = time.Second * 10
sPubKey := string(pub)
req.PublicKeyData = sPubKey
respData, err := tpp.RequestSSHCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
retReq := &certificate.SshCertRequest{
PickupID: respData.DN,
IncludeCertificateDetails: true,
Timeout: time.Duration(10) * time.Second,
}
resp, err := tpp.RetrieveSSHCertificate(retReq)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
if resp.PrivateKeyData != "" {
t.Error("Private key data is not empty")
}
if resp.PublicKeyData == "" {
t.Error("Public key data is empty")
}
if resp.PublicKeyData != req.PublicKeyData {
t.Error("expected public key data is different")
}
if resp.CertificateData == "" {
t.Error("Certificate key data is empty")
}
validFrom := util.ConvertSecondsToTime(resp.CertificateDetails.ValidFrom)
validTo := util.ConvertSecondsToTime(resp.CertificateDetails.ValidTo)
durationFromCert := validTo.Sub(validFrom)
hours := durationFromCert.Hours()
intHours := int(hours)
if intHours != duration {
t.Errorf("certificate duration is different, expected: %v but got %v", duration, intHours)
}
}
func TestCreateSshCertProvidedPubKey(t *testing.T) {
t.Skip("skipping this test since a fresh generated ssh public key is required")
var fileContent []byte
absPath, err := filepath.Abs("../../../test-files/open-source-ssh-cert-test.pub")
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
fileContent, err = ioutil.ReadFile(absPath)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
content := string(fileContent)
if content == "" {
t.Fatal("public key is empty")
}
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
duration := 4
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
var req = &certificate.SshCertRequest{}
req.KeyId = test.RandSshKeyId()
req.ValidityPeriod = fmt.Sprint(duration, "h")
req.Template = os.Getenv("TPP_SSH_CA")
req.PublicKeyData = content
req.SourceAddresses = []string{"test.com"}
respData, err := tpp.RequestSSHCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
retReq := &certificate.SshCertRequest{
PickupID: respData.DN,
IncludeCertificateDetails: true,
Timeout: time.Duration(10) * time.Second,
}
resp, err := tpp.RetrieveSSHCertificate(retReq)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
if resp.CertificateData == "" {
t.Error("Certificate key data is empty")
}
validFrom := util.ConvertSecondsToTime(resp.CertificateDetails.ValidFrom)
validTo := util.ConvertSecondsToTime(resp.CertificateDetails.ValidTo)
durationFromCert := validTo.Sub(validFrom)
hours := durationFromCert.Hours()
intHours := int(hours)
if intHours != duration {
fmt.Errorf("certificate duration is different, expected: %v but got %v", duration, intHours)
}
}
func TestSshGetConfig(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
tpp.verbose = true
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
var req = &certificate.SshCaTemplateRequest{}
req.Template = os.Getenv("TPP_SSH_CA")
data, err := tpp.RetrieveSshConfig(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
if data.CaPublicKey == "" {
t.Fatalf("CA public key is empty")
}
if len(data.Principals) == 0 {
t.Fatalf("principals are empty ")
}
}
func TestGetCertificateMetaData(t *testing.T) {
tpp, err := getTestConnector(ctx.TPPurl, ctx.TPPZone)
if err != nil {
t.Fatalf("err is not nil, err: %s url: %s", err, expectedURL)
}
if tpp.apiKey == "" {
err = tpp.Authenticate(&endpoint.Authentication{AccessToken: ctx.TPPaccessToken})
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
}
config, err := tpp.ReadZoneConfiguration()
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
cn := test.RandCN()
req := &certificate.Request{}
req.Subject.CommonName = cn
req.Subject.Organization = []string{"Venafi, Inc."}
req.Subject.OrganizationalUnit = []string{"Automated Tests"}
req.Subject.Locality = []string{"Las Vegas"}
req.Subject.Province = []string{"Nevada"}
req.Subject.Country = []string{"US"}
err = tpp.GenerateRequest(config, req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
t.Logf("getPolicyDN(ctx.TPPZone) = %s", getPolicyDN(ctx.TPPZone))
dn, err := tpp.RequestCertificate(req)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
metaData, err := tpp.RetrieveCertificateMetaData(dn)
if err != nil {
t.Fatalf("err is not nil, err: %s", err)
}
if metaData == nil {
t.Fatal("meta data is nil")
}
}
|
[
"\"TPP_PASSWORD\"",
"\"TPP_PM_ROOT\"",
"\"TPP_POLICY_MANAGEMENT_SAMPLE\"",
"\"TPP_PM_ROOT\"",
"\"TPP_PM_ROOT\"",
"\"TPP_PM_ROOT\"",
"\"TPP_SSH_CA\"",
"\"TPP_SSH_CA\"",
"\"TPP_SSH_CA\"",
"\"TPP_SSH_CA\""
] |
[] |
[
"TPP_PASSWORD",
"TPP_POLICY_MANAGEMENT_SAMPLE",
"TPP_PM_ROOT",
"TPP_SSH_CA"
] |
[]
|
["TPP_PASSWORD", "TPP_POLICY_MANAGEMENT_SAMPLE", "TPP_PM_ROOT", "TPP_SSH_CA"]
|
go
| 4 | 0 | |
scripts/slack/notify_success_operator_push.py
|
#!/usr/bin/python3
import sys
import os
import requests
import json
def notifySlack(operator_version, upstream_community, pr_url):
circle_build_url = os.getenv('CIRCLE_BUILD_URL')
url = os.getenv('SLACK_WEBHOOK')
data = {
'attachments':
[
{
'color': '#7CD197',
'fallback': 'Build Notification: ' + circle_build_url,
'title': 'A new Snyk Operator has been pushed to ' + upstream_community,
'text': 'A PR has been opened for branch *snyk/' + upstream_community + '/snyk-operator-v' + operator_version + '* on GitHub repo ' + upstream_community +' for ' + upstream_community + '.\n' + pr_url
}
]
}
requests.post(url, data=json.dumps(data))
if __name__ == '__main__':
operator_version = sys.argv[1]
upstream_community = sys.argv[2]
pr_url = sys.argv[3]
notifySlack(operator_version, upstream_community, pr_url)
|
[] |
[] |
[
"SLACK_WEBHOOK",
"CIRCLE_BUILD_URL"
] |
[]
|
["SLACK_WEBHOOK", "CIRCLE_BUILD_URL"]
|
python
| 2 | 0 | |
python/mxnet/contrib/quantization.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Quantization module for generating quantized (INT8) models from FP32 models."""
try:
from scipy import stats
except ImportError:
stats = None
import ctypes
import logging
import os
import shutil
import warnings
import numpy as np
from ..base import _LIB, check_call, py_str
from ..base import c_array, c_str, mx_uint, c_str_array
from ..base import NDArrayHandle, SymbolHandle
from ..symbol import Symbol
from ..symbol import load as sym_load
from .. import ndarray
from ..ndarray import load as nd_load
from ..ndarray import save as nd_save
from ..ndarray import NDArray
from ..io import DataIter, DataDesc, DataBatch
from ..context import cpu, Context
from ..module import Module
def _quantize_params(qsym, params, th_dict):
"""Given a quantized symbol and a dict of params that have not been quantized,
generate quantized params. Currently only supports quantizing the arg_params
with names of `weight` or `bias`, not aux_params. If `qsym` contains symbols
that are excluded from being quantized, their corresponding params will
not be quantized, but saved together with quantized params of the symbols that
have been quantized.
Parameters
----------
qsym : Symbol
Quantized symbol from FP32 symbol.
params : dict of str->NDArray
th_dict: dict of min/max pairs of layers' output
"""
inputs_name = qsym.list_arguments()
quantized_params = {}
for name in inputs_name:
if name.endswith(('weight_quantize', 'bias_quantize')):
original_name = name[:-len('_quantize')]
param = params[original_name]
# pylint: disable=unbalanced-tuple-unpacking
val, vmin, vmax = ndarray.contrib.quantize(data=param,
min_range=ndarray.min(param),
max_range=ndarray.max(param),
out_type='int8')
quantized_params[name] = val
quantized_params[name+'_min'] = vmin
quantized_params[name+'_max'] = vmax
elif name in params:
quantized_params[name] = params[name]
elif name.endswith(('_min')):
output = name[: - len('_min')]
if output in th_dict:
quantized_params[name] = ndarray.array([th_dict[output][0]])
elif name.endswith(('_max')):
output = name[: - len('_min')]
if output in th_dict:
quantized_params[name] = ndarray.array([th_dict[output][1]])
return quantized_params
def _quantize_symbol(sym, ctx, excluded_symbols=None, excluded_operators=None,
offline_params=None, quantized_dtype='int8', quantize_mode='smart',
quantize_granularity='tensor-wise'):
"""Given a symbol object representing a neural network of data type FP32,
quantize it into a INT8 network.
Parameters
----------
sym : Symbol
FP32 neural network symbol.
ctx : Context
Defines the device that users want to run quantized symbol.
excluded_symbols : list of strings
A list of strings representing the names of the symbols that users want to excluding
from being quantized.
excluded_operators : list of strings
A list of strings representing the names of the operators that users want to excluding
from being quantized.
offline_params : list of strs
Names of the parameters that users want to quantize offline. It's always recommended to
quantize parameters offline so that quantizing parameters during the inference can be
avoided.
quantized_dtype: str
The quantized destination type for input data.
quantize_mode: str
The mode that quantization pass to apply.
quantize_granularity: str
The granularity of quantization, currently supports 'tensor-wise' and 'channel-wise'
quantization. The default value is 'tensor-wise'.
"""
num_excluded_symbols = 0
if excluded_symbols is not None:
assert isinstance(excluded_symbols, list)
num_excluded_symbols = len(excluded_symbols)
else:
excluded_symbols = []
num_excluded_ops = 0
if excluded_operators is not None:
assert isinstance(excluded_operators, list)
num_excluded_ops = len(excluded_operators)
else:
excluded_operators = []
num_offline = 0
offline = []
if offline_params is not None:
num_offline = len(offline_params)
for k in offline_params:
offline.append(c_str(k))
out = SymbolHandle()
size = mx_uint()
calib_str = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXQuantizeSymbol(sym.handle,
ctypes.byref(out),
ctypes.byref(ctypes.c_int(ctx.device_typeid)),
mx_uint(num_excluded_symbols),
c_str_array(excluded_symbols),
mx_uint(num_excluded_ops),
c_str_array(excluded_operators),
mx_uint(num_offline),
c_array(ctypes.c_char_p, offline),
c_str(quantized_dtype),
ctypes.c_bool(True),
c_str(quantize_mode),
c_str(quantize_granularity),
ctypes.byref(size),
ctypes.byref(calib_str)))
calib_layer = []
calib_layer = [py_str(calib_str[i]) for i in range(size.value)]
return Symbol(out), calib_layer
def combine_histogram(old_hist, arr, new_min, new_max, new_th):
""" Collect layer histogram for arr and combine it with old histogram.
"""
(old_hist, old_hist_edges, old_min, old_max, old_th) = old_hist
if new_th <= old_th:
hist, _ = np.histogram(arr, bins=len(old_hist), range=(-old_th, old_th))
return (old_hist + hist, old_hist_edges, min(old_min, new_min), max(old_max, new_max), old_th)
else:
# Need to generate new histogram with new_th
old_num_bins = len(old_hist)
old_step = 2 * old_th / old_num_bins
half_increased_bins = int((new_th - old_th) // old_step + 1)
new_num_bins = half_increased_bins * 2 + old_num_bins
new_th = half_increased_bins * old_step + old_th
hist, hist_edges = np.histogram(arr, bins=new_num_bins, range=(-new_th, new_th))
hist[half_increased_bins:new_num_bins - half_increased_bins] += old_hist
return (hist, hist_edges, min(old_min, new_min), max(old_max, new_max), new_th)
class _LayerHistogramCollector(object):
"""Saves layer histogram in a dict with layer names as keys and lists of NDArrays as
values. The collected histogram will be used for calculating the optimal thresholds for
quantization using KL divergence.
"""
def __init__(self, num_bins=8001, include_layer=None, logger=None):
self.hist_dict = {}
self.num_bins = num_bins
self.include_layer = include_layer
self.logger = logger
def collect(self, name, arr):
"""Callback function for collecting layer output NDArrays."""
name = py_str(name)
if name not in self.include_layer:
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False).copyto(cpu()).asnumpy()
if self.logger:
self.logger.debug("Collecting layer %s histogram of shape %s" % (name, arr.shape))
min_range = np.min(arr)
max_range = np.max(arr)
th = max(abs(min_range), abs(max_range))
if name in self.hist_dict:
self.hist_dict[name] = combine_histogram(self.hist_dict[name], arr, min_range, max_range, th)
else:
hist, hist_edges = np.histogram(arr, bins=self.num_bins, range=(-th, th))
self.hist_dict[name] = (hist, hist_edges, min_range, max_range, th)
class _LayerOutputMinMaxCollector(object):
"""Saves layer output min and max values in a dict with layer names as keys.
The collected min and max values will be directly used as thresholds for quantization.
"""
def __init__(self, quantized_dtype, include_layer=None, logger=None):
self.min_max_dict = {}
self.quantized_dtype = quantized_dtype
self.include_layer = include_layer
self.logger = logger
def collect(self, name, arr):
"""Callback function for collecting min and max values from an NDArray."""
name = py_str(name)
if name not in self.include_layer:
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False)
min_range = ndarray.min(arr).asscalar()
max_range = ndarray.max(arr).asscalar()
if name in self.min_max_dict:
cur_min_max = self.min_max_dict[name]
self.min_max_dict[name] = (min(cur_min_max[0], min_range),
max(cur_min_max[1], max_range))
else:
self.min_max_dict[name] = (min_range, max_range)
if self.logger:
self.logger.debug("Collecting layer %s min_range=%f, max_range=%f"
% (name, min_range, max_range))
def _calibrate_quantized_sym(qsym, th_dict):
"""Given a dictionary containing the thresholds for quantizing the layers,
set the thresholds into the quantized symbol as the params of requantize operators.
"""
if th_dict is None or len(th_dict) == 0:
return qsym
num_layer_outputs = len(th_dict)
layer_output_names = []
min_vals = []
max_vals = []
for k, v in th_dict.items():
layer_output_names.append(k)
min_vals.append(v[0])
max_vals.append(v[1])
calibrated_sym = SymbolHandle()
check_call(_LIB.MXSetCalibTableToQuantizedSymbol(qsym.handle,
mx_uint(num_layer_outputs),
c_str_array(layer_output_names),
c_array(ctypes.c_float, min_vals),
c_array(ctypes.c_float, max_vals),
ctypes.byref(calibrated_sym)))
return Symbol(calibrated_sym)
def _collect_layer_statistics(mod, data, collector, max_num_examples=None, logger=None):
if not isinstance(data, DataIter):
raise ValueError('Only supports data as a type of DataIter, while received type %s'
% str(type(data)))
mod._exec_group.execs[0].set_monitor_callback(collector.collect, monitor_all=True)
num_batches = 0
num_examples = 0
for batch in data:
mod.forward(data_batch=batch, is_train=False)
num_batches += 1
num_examples += data.batch_size
if max_num_examples is not None and num_examples >= max_num_examples:
break
if logger is not None:
logger.info("Collected statistics from %d batches with batch_size=%d"
% (num_batches, data.batch_size))
return num_examples
def _collect_layer_output_min_max(mod, data, quantized_dtype, include_layer=None,
max_num_examples=None, logger=None):
"""Collect min and max values from layer outputs and save them in
a dictionary mapped by layer names.
"""
collector = _LayerOutputMinMaxCollector(quantized_dtype=quantized_dtype,
include_layer=include_layer, logger=logger)
num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger)
return collector.min_max_dict, num_examples
def _collect_layer_histogram(mod, data, include_layer=None,
max_num_examples=None, logger=None):
"""Collect layer outputs and save them in a dictionary mapped by layer names."""
collector = _LayerHistogramCollector(include_layer=include_layer, logger=logger)
num_examples = _collect_layer_statistics(mod, data, collector, max_num_examples, logger)
return collector.hist_dict, num_examples
def _smooth_distribution(p, eps=0.0001):
"""Given a discrete distribution (may have not been normalized to 1),
smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
corresponding amount off the non-zero values.
Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
"""
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.')
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + (-eps1) * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
# pylint: disable=line-too-long
def _get_optimal_threshold(hist_data, quantized_dtype, num_quantized_bins=255):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
(hist, hist_edges, min_val, max_val, _) = hist_data
num_bins = len(hist)
assert (num_bins % 2 == 1)
if min_val >= 0 and quantized_dtype in ['auto', 'uint8']:
# We need to move negative bins to positive bins to fit uint8 range.
num_quantized_bins = num_quantized_bins * 2 + 1
hist = ndarray.array(hist, ctx=cpu())
hist_edges = ndarray.array(hist_edges, ctx=cpu())
threshold, divergence = ndarray.contrib.calibrate_entropy(hist=hist,
hist_edges=hist_edges,
num_quantized_bins=num_quantized_bins)
threshold = threshold.asnumpy()
divergence = divergence.asnumpy()
return min_val, max_val, threshold, divergence
# pylint: enable=line-too-long
def _get_optimal_thresholds(hist_dict, quantized_dtype, num_quantized_bins=255, logger=None):
"""Given a ndarray dict, find the optimal threshold for quantizing each value of the key."""
if stats is None:
raise ImportError('scipy.stats is required for running entropy mode of calculating'
' the optimal thresholds for quantizing FP32 ndarrays into int8.'
' Please check if the scipy python bindings are installed.')
assert isinstance(hist_dict, dict)
if logger is not None:
logger.info('Calculating optimal thresholds for quantization using KL divergence'
' with num_quantized_bins=%d' % num_quantized_bins)
th_dict = {}
# copy hist_dict keys since the keys() only returns a view in python3
layer_names = list(hist_dict.keys())
for name in layer_names:
assert name in hist_dict
min_val, max_val, th, divergence = \
_get_optimal_threshold(hist_dict[name], quantized_dtype,
num_quantized_bins=num_quantized_bins)
if min_val >= 0 and quantized_dtype in ['auto', 'uint8']:
th_dict[name] = (0, th)
else:
th_dict[name] = (-th, th)
del hist_dict[name] # release the memory
if logger:
logger.debug('layer=%s, min_val=%f, max_val=%f, th=%f, divergence=%f'
% (name, min_val, max_val, th, divergence))
return th_dict
def _load_sym(sym, logger=None):
"""Given a str as a path the symbol .json file or a symbol, returns a Symbol object."""
if isinstance(sym, str): # sym is a symbol file path
cur_path = os.path.dirname(os.path.realpath(__file__))
symbol_file_path = os.path.join(cur_path, sym)
if logger:
logger.info('Loading symbol from file %s' % symbol_file_path)
return sym_load(symbol_file_path)
elif isinstance(sym, Symbol):
return sym
else:
raise ValueError('_load_sym only accepts Symbol or path to the symbol file,'
' while received type %s' % str(type(sym)))
def _load_params(params, logger=None):
"""Given a str as a path to the .params file or a pair of params,
returns two dictionaries representing arg_params and aux_params.
"""
if isinstance(params, str):
cur_path = os.path.dirname(os.path.realpath(__file__))
param_file_path = os.path.join(cur_path, params)
if logger:
logger.info('Loading params from file %s' % param_file_path)
save_dict = nd_load(param_file_path)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params
elif isinstance(params, (tuple, list)) and len(params) == 2:
return params[0], params[1]
else:
raise ValueError('Unsupported params provided. Must be either a path to the param file or'
' a pair of dictionaries representing arg_params and aux_params')
# pylint: disable=super-init-not-called
class _DataIterWrapper(DataIter):
"""DataIter wrapper for general iterator, e.g., gluon dataloader"""
def __init__(self, calib_data):
self._data = calib_data
try:
calib_iter = iter(calib_data)
except TypeError as e:
raise TypeError('calib_data is not a valid iterator. {}'.format(str(e)))
data_example = next(calib_iter)
if isinstance(data_example, (list, tuple)):
data_example = list(data_example)
else:
data_example = [data_example]
# suppose there must be one label in data_example
# TODO(xinyu-intel): little tricky here, need to refactor.
num_data = len(data_example)
assert num_data > 0
# here reshape is to handle the 5D/6D input data
if len(data_example[0].shape) > 4:
data_example[0] = data_example[0].reshape((-1,) + data_example[0].shape[2:])
self.provide_data = [DataDesc(name='data', shape=(data_example[0].shape))]
self.provide_data += [DataDesc(name='data{}'.format(i), shape=x.shape) for i, x in enumerate(data_example[1:])]
# data0, data1, ..., label
if num_data >= 3:
self.provide_data = [DataDesc(name='data{}'.format(i), shape=x.shape)
for i, x in enumerate(data_example[0:])]
self.batch_size = data_example[0].shape[0]
self.reset()
def reset(self):
self._iter = iter(self._data)
def next(self):
next_data = next(self._iter)
# here reshape is to handle the 5D/6D input data
if len(next_data[0].shape) > 4:
next_data[0] = next_data[0].reshape((-1,) + next_data[0].shape[2:])
return DataBatch(data=next_data)
# pylint: enable=super-init-not-called
def _as_data_iter(calib_data):
"""Convert normal iterator to mx.io.DataIter while parsing the data_shapes"""
if isinstance(calib_data, DataIter):
# already validated DataIter, just return
return calib_data, calib_data.provide_data
calib_data = _DataIterWrapper(calib_data)
return calib_data, calib_data.provide_data
def quantize_model(sym, arg_params, aux_params,
data_names=('data',), label_names=('softmax_label',),
ctx=cpu(), excluded_sym_names=None, excluded_op_names=None, calib_mode='entropy',
calib_data=None, num_calib_examples=None,
quantized_dtype='int8', quantize_mode='smart',
quantize_granularity='tensor-wise', logger=None):
"""User-level API for generating a quantized model from a FP32 model w/ or w/o calibration.
The backend quantized operators are only enabled for Linux systems. Please do not run
inference using the quantized models on Windows for now.
The quantization implementation adopts the TensorFlow's approach:
https://www.tensorflow.org/performance/quantization.
The calibration implementation borrows the idea of Nvidia's 8-bit Inference with TensorRT:
http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
and adapts the method to MXNet.
Parameters
----------
sym : str or Symbol
Defines the structure of a neural network for FP32 data types.
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
data_names : a list of strs
Data names required for creating a Module object to run forward propagation on the
calibration dataset.
label_names : a list of strs
Label names required for creating a Module object to run forward propagation on the
calibration dataset.
ctx : Context
Defines the device that users want to run forward propagation on the calibration
dataset for collecting layer output statistics. Currently, only supports single context.
excluded_sym_names : list of strings
A list of strings representing the names of the symbols that users want to excluding
from being quantized.
excluded_op_names : list of strings
A list of strings representing the names of the operators that users want to excluding
from being quantized.
calib_mode : str
If calib_mode='none', no calibration will be used and the thresholds for
requantization after the corresponding layers will be calculated at runtime by
calling min and max operators. The quantized models generated in this
mode are normally 10-20% slower than those with calibrations during inference.
If calib_mode='naive', the min and max values of the layer outputs from a calibration
dataset will be directly taken as the thresholds for quantization.
If calib_mode='entropy' (default mode), the thresholds for quantization will be
derived such that the KL divergence between the distributions of FP32 layer outputs and
quantized layer outputs is minimized based upon the calibration dataset.
calib_data : DataIter
A data iterator initialized by the calibration dataset.
num_calib_examples : int or None
The maximum number of examples that user would like to use for calibration. If not provided,
the whole calibration dataset will be used.
quantized_dtype : str
The quantized destination type for input data. Currently support 'int8', 'uint8' and 'auto'.
'auto' means automatically select output type according to calibration result.
Default value is 'int8'.
quantize_mode : str
The mode that quantization pass to apply. Support 'full' and 'smart'.
'full' means quantize all operator if possible.
'smart' means quantization pass will smartly choice which operator should be quantized.
quantize_granularity: str
The granularity of quantization, currently supports 'tensor-wise' and 'channel-wise'
quantization. The default value is 'tensor-wise'.
logger : Object
A logging object for printing information during the process of quantization.
Returns
-------
tuple
A tuple of quantized symbol, quantized arg_params, and aux_params.
-------
"""
if excluded_sym_names is None:
excluded_sym_names = []
if not isinstance(excluded_sym_names, list):
raise ValueError('excluded_sym_names must be a list of strings representing'
' the names of the symbols that will not be quantized,'
' while received type %s' % str(type(excluded_sym_names)))
if excluded_op_names is None:
excluded_op_names = []
if not isinstance(excluded_op_names, list):
raise ValueError('excluded_op_names must be a list of strings representing'
' the names of the operators that will not be quantized,'
' while received type %s' % str(type(excluded_op_names)))
if logger:
os.environ['MXNET_QUANTIZATION_VERBOSE'] = '1'
logger.info('Quantizing symbol')
if quantized_dtype not in ('int8', 'uint8', 'auto'):
raise ValueError('unknown quantized_dtype %s received,'
' expected `int8`, `uint8` or `auto`' % quantized_dtype)
if quantize_granularity not in ('tensor-wise', 'channel-wise'):
raise ValueError('unkonwn quantize_granularity %s received,'
' expected `tensor-wise` or `channel-wise`.' % quantize_granularity)
qsym, calib_layer = _quantize_symbol(sym, ctx, excluded_symbols=excluded_sym_names,
excluded_operators=excluded_op_names,
offline_params=list(arg_params.keys()),
quantized_dtype=quantized_dtype,
quantize_mode=quantize_mode,
quantize_granularity=quantize_granularity)
th_dict = {}
if calib_mode is not None and calib_mode != 'none':
if not isinstance(ctx, Context):
raise ValueError('currently only supports single ctx, while received %s' % str(ctx))
if calib_data is None:
raise ValueError('calib_data must be provided when calib_mode=%s' % calib_mode)
if not isinstance(calib_data, DataIter):
raise ValueError('calib_data must be of DataIter type when calib_mode=%s,'
' while received type %s' % (calib_mode, str(type(calib_data))))
mod = Module(symbol=sym, data_names=data_names, label_names=label_names, context=ctx)
if len(calib_data.provide_label) > 0:
mod.bind(for_training=False, data_shapes=calib_data.provide_data,
label_shapes=calib_data.provide_label)
else:
mod.bind(for_training=False, data_shapes=calib_data.provide_data)
mod.set_params(arg_params, aux_params)
if calib_mode == 'entropy':
hist_dict, num_examples = _collect_layer_histogram(mod, calib_data,
include_layer=calib_layer,
max_num_examples=num_calib_examples,
logger=logger)
if logger:
logger.info('Collected layer outputs from FP32 model using %d examples' % num_examples)
logger.info('Calculating optimal thresholds for quantization')
th_dict = _get_optimal_thresholds(hist_dict, quantized_dtype, logger=logger)
elif calib_mode == 'naive':
th_dict, num_examples = _collect_layer_output_min_max(
mod, calib_data, quantized_dtype, include_layer=calib_layer, max_num_examples=num_calib_examples,
logger=logger)
if logger:
logger.info('Collected layer output min/max values from FP32 model using %d examples'
% num_examples)
else:
raise ValueError('unknown calibration mode %s received,'
' expected `none`, `naive`, or `entropy`' % calib_mode)
qsym = _calibrate_quantized_sym(qsym, th_dict)
if logger:
logger.info('Quantizing parameters')
qarg_params = _quantize_params(qsym, arg_params, th_dict)
return qsym, qarg_params, aux_params
def quantize_model_mkldnn(sym, arg_params, aux_params,
data_names=('data',), label_names=('softmax_label',),
ctx=cpu(), excluded_sym_names=None, excluded_op_names=None,
calib_mode='entropy', calib_data=None, num_calib_examples=None,
quantized_dtype='int8', quantize_mode='smart',
quantize_granularity='tensor-wise', logger=None):
"""User-level API for generating a fusion + quantized model from a FP32 model
w/ or w/o calibration with Intel MKL-DNN.
The backend quantized operators are only enabled for Linux systems. Please do not run
inference using the quantized models on Windows for now.
Parameters
----------
same with quantize_model
Returns
-------
tuple
A tuple of quantized symbol, quantized arg_params, and aux_params.
-------
"""
if not isinstance(ctx, Context):
raise ValueError('currently only supports single ctx, while received %s' % str(ctx))
if ctx.device_type != 'cpu':
raise ValueError(
'quantize_model_mkldnn only support Intel cpu platform with MKL-DNN Backend')
sym = sym.get_backend_symbol('MKLDNN_QUANTIZE')
qsym, qarg_params, aux_params = quantize_model(sym=sym, arg_params=arg_params, aux_params=aux_params,
data_names=data_names, label_names=label_names,
ctx=ctx, excluded_sym_names=excluded_sym_names,
excluded_op_names=excluded_op_names,
calib_mode=calib_mode, calib_data=calib_data,
num_calib_examples=num_calib_examples,
quantized_dtype=quantized_dtype, quantize_mode=quantize_mode,
quantize_granularity=quantize_granularity, logger=logger)
qsym = qsym.get_backend_symbol('MKLDNN_QUANTIZE')
return qsym, qarg_params, aux_params
def quantize_graph(sym, arg_params, aux_params, ctx=cpu(),
excluded_sym_names=None, excluded_op_names=None,
calib_mode='entropy', quantized_dtype='int8',
quantize_mode='full', quantize_granularity='tensor-wise',
LayerOutputCollector=None, logger=None):
"""User-level API for generating a quantized model from a FP32 model w/o calibration
and a collector for naive or entropy calibration.
The backend quantized operators are only enabled for Linux systems. Please do not run
inference using the quantized models on Windows for now.
Parameters
----------
sym : str or Symbol
Defines the structure of a neural network for FP32 data types.
ctx : Context
Defines the device that users want to run forward propagation on the calibration
dataset for collecting layer output statistics. Currently, only supports single context.
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
excluded_sym_names : list of strings
A list of strings representing the names of the symbols that users want to excluding
from being quantized.
excluded_op_names : list of strings
A list of strings representing the names of the operators that users want to excluding
calib_mode : str
If calib_mode='none', no calibration will be used and the thresholds for
requantization after the corresponding layers will be calculated at runtime by
calling min and max operators. The quantized models generated in this
mode are normally 10-20% slower than those with calibrations during inference.
If calib_mode='naive', the min and max values of the layer outputs from a calibration
dataset will be directly taken as the thresholds for quantization.
If calib_mode='entropy' (default mode), the thresholds for quantization will be
derived such that the KL divergence between the distributions of FP32 layer outputs and
quantized layer outputs is minimized based upon the calibration dataset.
quantized_dtype : str
The quantized destination type for input data. Currently support 'int8'
, 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result.
Default value is 'int8'.
quantize_mode : str
The mode that quantization pass to apply. Support 'full' and 'smart'.
'full' means quantize all operator if possible.
'smart' means quantization pass will smartly choice which operator should be quantized.
quantize_granularity: str
The granularity of quantization, currently supports 'tensor-wise' and 'channel-wise'
quantization. The default value is 'tensor-wise'.
LayerOutputCollector : class
For customize calibration method usage.
logger : Object
A logging object for printing information during the process of quantization.
Returns
-------
tuple
A tuple of quantized symbol, quantized arg_params, aux_params and collector.
-------
"""
if excluded_sym_names is None:
excluded_sym_names = []
if not isinstance(excluded_sym_names, list):
raise ValueError('excluded_sym_names must be a list of strings representing'
' the names of the symbols that will not be quantized,'
' while received type %s' % str(type(excluded_sym_names)))
if not isinstance(ctx, Context):
raise ValueError('currently only supports single ctx, while received %s' % str(ctx))
if logger:
os.environ['MXNET_QUANTIZATION_VERBOSE'] = '1'
logger.info('Quantizing graph')
if quantized_dtype not in ('int8', 'uint8', 'auto'):
raise ValueError('unknown quantized_dtype %s received,'
' expected `int8`, `uint8` or `auto`' % quantized_dtype)
if quantize_granularity not in ('tensor-wise', 'channel-wise'):
raise ValueError('unkonwn quantize_granularity %s received,'
' expected `tensor-wise` or `channel-wise`.' % quantize_granularity)
qsym, calib_layer = _quantize_symbol(sym, ctx, excluded_symbols=excluded_sym_names,
excluded_operators=excluded_op_names,
offline_params=list(
arg_params.keys()),
quantized_dtype=quantized_dtype,
quantize_mode=quantize_mode,
quantize_granularity=quantize_granularity)
th_dict = {}
collector = None
if calib_mode is not None and calib_mode != 'none':
if calib_mode == 'entropy':
collector = _LayerHistogramCollector(
include_layer=calib_layer, logger=logger)
if logger:
logger.info(
'Create a layer output collector for entropy calibration.')
elif calib_mode == 'naive':
collector = _LayerOutputMinMaxCollector(quantized_dtype=quantized_dtype,
include_layer=calib_layer, logger=logger)
if logger:
logger.info(
'Create a layer output minmax collector for naive calibration')
elif calib_mode == 'customize' and LayerOutputCollector is not None:
collector = LayerOutputCollector
if logger:
logger.info(
'Create a customize layer output minmax collector for calibration')
else:
raise ValueError('unknown calibration mode %s received,'
' expected `none`, `naive`, `entropy` or `customize`' % calib_mode)
if logger:
logger.info('Collector created, please use set_monitor_callback'
' to collect calibration information.')
if logger:
logger.info('Quantizing parameters')
qarg_params = _quantize_params(qsym, arg_params, th_dict)
return qsym, qarg_params, aux_params, collector
def calib_graph(qsym, arg_params, aux_params, collector,
calib_mode='entropy', quantized_dtype='int8', logger=logging):
"""User-level API for calibrating a quantized model using a filled collector.
The backend quantized operators are only enabled for Linux systems. Please do not run
inference using the quantized models on Windows for now.
Parameters
----------
qsym : str or Symbol
Defines the structure of a neural network for INT8 data types.
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
collector : function
layer collector for naive or entropy calibration.
calib_mode : str
If calib_mode='none', no calibration will be used and the thresholds for
requantization after the corresponding layers will be calculated at runtime by
calling min and max operators. The quantized models generated in this
mode are normally 10-20% slower than those with calibrations during inference.
If calib_mode='naive', the min and max values of the layer outputs from a calibration
dataset will be directly taken as the thresholds for quantization.
If calib_mode='entropy' (default mode), the thresholds for quantization will be
derived such that the KL divergence between the distributions of FP32 layer outputs and
quantized layer outputs is minimized based upon the calibration dataset.
quantized_dtype : str
The quantized destination type for input data. Currently support 'int8'
, 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result.
Default value is 'int8'.
logger : Object
A logging object for printing information during the process of quantization.
Returns
-------
tuple
A tuple of calibrated symbol, quantized arg_params, aux_params.
-------
"""
th_dict = {}
if calib_mode is not None and calib_mode != 'none':
if calib_mode == 'entropy':
if logger:
logger.info('Calculating optimal thresholds for quantization')
th_dict = _get_optimal_thresholds(
collector.hist_dict, quantized_dtype, logger=logger)
elif calib_mode == 'naive':
th_dict = collector.min_max_dict
elif calib_mode == 'customize':
th_dict = collector.min_max_dict
else:
raise ValueError('unknown calibration mode %s received,'
' expected `none`, `naive`, `entropy` or `customize`' % calib_mode)
qsym = _calibrate_quantized_sym(qsym, th_dict)
else:
raise ValueError('please set calibration mode to naive or entropy.')
if logger:
logger.info('Quantizing parameters')
qarg_params = _quantize_params(qsym, arg_params, th_dict)
return qsym, qarg_params, aux_params
def quantize_net_v2(network, quantized_dtype='auto', quantize_mode='full', quantize_granularity='tensor-wise',
exclude_layers=None, exclude_layers_match=None, exclude_operators=None,
calib_data=None, data_shapes=None, calib_mode='none',
num_calib_examples=None, ctx=cpu(), LayerOutputCollector=None, logger=None):
"""User-level API for Gluon users to generate a quantized SymbolBlock from a FP32 HybridBlock w/ or w/o calibration.
The backend quantized operators are only enabled for Linux systems. Please do not run
inference using the quantized models on Windows for now.
Parameters
----------
network : Gluon HybridBlock
Defines the structure of a neural network for FP32 data types.
quantized_dtype : str
The quantized destination type for input data. Currently support 'int8'
, 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result.
Default value is 'int8'.
quantize_mode : str
The mode that quantization pass to apply. Support 'full' and 'smart'.
'full' means quantize all operator if possible.
'smart' means quantization pass will smartly choice which operator should be quantized.
quantize_granularity: str
The granularity of quantization, currently supports 'tensor-wise' and 'channel-wise'
quantization. The default value is 'tensor-wise'.
exclude_layers : list of strings
A list of strings representing the names of the symbols that users want to excluding
exclude_layers_match : list of strings
A list of strings wildcard matching the names of the symbols that users want to excluding
from being quantized.
exclude_operators : list of strings
A list of strings representing the names of the operators that users want to excluding
calib_data : mx.io.DataIter or gluon.DataLoader
A iterable data loading object.
data_shapes : list
List of DataDesc, required if calib_data is not provided
calib_mode : str
If calib_mode='none', no calibration will be used and the thresholds for
requantization after the corresponding layers will be calculated at runtime by
calling min and max operators. The quantized models generated in this
mode are normally 10-20% slower than those with calibrations during inference.
If calib_mode='naive', the min and max values of the layer outputs from a calibration
dataset will be directly taken as the thresholds for quantization.
If calib_mode='entropy' (default mode), the thresholds for quantization will be
derived such that the KL divergence between the distributions of FP32 layer outputs and
quantized layer outputs is minimized based upon the calibration dataset.
num_calib_examples : int or None
The maximum number of examples that user would like to use for calibration. If not provided,
the whole calibration dataset will be used.
ctx : Context
Defines the device that users want to run forward propagation on the calibration
dataset for collecting layer output statistics. Currently, only supports single context.
LayerOutputCollector : class
For customize calibration method usage.
logger : Object
A logging object for printing information during the process of quantization.
Returns
-------
network : Gluon SymbolBlock
Defines the structure of a neural network for INT8 data types.
-------
"""
if logger:
logger.info('Export HybridBlock')
network.hybridize()
import mxnet as mx
if calib_data is not None:
if isinstance(calib_data, DataIter):
dshapes = calib_data.provide_data
else:
calib_data, dshapes = _as_data_iter(calib_data)
if not data_shapes:
data_shapes = dshapes
if not data_shapes:
raise ValueError('data_shapes required')
data_nd = []
for shape in data_shapes:
data_nd.append(mx.nd.zeros(shape.shape))
while True:
try:
network(*data_nd)
except TypeError:
del data_nd[-1]
del calib_data.provide_data[-1]
continue
else:
break
import tempfile
try:
from tempfile import TemporaryDirectory
except ImportError:
# really simple implementation of TemporaryDirectory
class TemporaryDirectory(object):
def __init__(self, suffix='', prefix='', dir=''):
self._dirname = tempfile.mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self._dirname
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self._dirname)
# TODO(xinyu-intel): tmp solution to save and reload for mxnet.mod.Module.
# will enhance `export` function to return `sym, args, auxs` directly.
with TemporaryDirectory() as tmpdirname:
prefix = os.path.join(tmpdirname, 'tmp')
network.export(prefix, epoch=0)
symnet, args, auxs = mx.model.load_checkpoint(prefix, 0)
if exclude_layers is None:
exclude_layers = []
if exclude_layers_match is None:
exclude_layers_match = []
if exclude_operators is None:
exclude_operators = []
for name_match in exclude_layers_match:
for layers in list(symnet.get_internals()):
if layers.name.find(name_match) != -1:
exclude_layers.append(layers.name)
if logger:
logger.info('These layers have been excluded %s' % exclude_layers)
if ctx == mx.cpu():
symnet = symnet.get_backend_symbol('MKLDNN_QUANTIZE')
qsym, qarg_params, aux_params, collector = quantize_graph(
sym=symnet, arg_params=args, aux_params=auxs, ctx=ctx,
excluded_sym_names=exclude_layers, excluded_op_names=exclude_operators,
calib_mode=calib_mode, quantized_dtype=quantized_dtype, quantize_mode=quantize_mode,
quantize_granularity=quantize_granularity, LayerOutputCollector=LayerOutputCollector,
logger=logger)
if calib_mode is not None and calib_mode != 'none':
if not isinstance(ctx, Context):
raise ValueError(
'currently only supports single ctx, while received %s' % str(ctx))
if calib_data is None:
raise ValueError(
'calib_data must be provided when calib_mode=%s' % calib_mode)
if calib_mode in ['naive', 'entropy', 'customize']:
data_names = [pair[0] for pair in calib_data.provide_data]
mod = Module(symbol=symnet, context=ctx,
data_names=data_names, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes)
mod.set_params(args, auxs, allow_missing=False, force_init=True)
num_examples = _collect_layer_statistics(mod, calib_data, collector,
num_calib_examples, logger)
if logger:
logger.info('Collected layer output values from FP32 model using %d examples'
% num_examples)
qsym, qarg_params, aux_params = calib_graph(
qsym=qsym, arg_params=args, aux_params=auxs, collector=collector,
calib_mode=calib_mode, quantized_dtype=quantized_dtype, logger=logger)
else:
raise ValueError(
'please set calibration mode to naive or entropy.')
elif calib_mode is not None and calib_mode == 'none':
data_names = [pair[0] for pair in data_shapes]
if ctx == mx.cpu():
qsym = qsym.get_backend_symbol('MKLDNN_QUANTIZE')
from ..gluon import SymbolBlock
data_sym = []
for name in data_names:
data_sym.append(mx.sym.var(name))
net = SymbolBlock(qsym, data_sym)
# TODO(xinyu-intel): tmp solution to save param_dict and reload for SymbolBlock
# will enhance SymbolBlock to load args, auxs directly.
with TemporaryDirectory() as tmpdirname:
prefix = os.path.join(tmpdirname, 'tmp')
param_name = '%s-%04d.params' % (prefix + 'net-quantized', 0)
save_dict = {('arg:%s' % k): v.as_in_context(cpu())
for k, v in qarg_params.items()}
save_dict.update({('aux:%s' % k): v.as_in_context(cpu())
for k, v in aux_params.items()})
nd_save(param_name, save_dict)
net.collect_params().load(param_name, cast_dtype=True, dtype_source='saved')
net.collect_params().reset_ctx(ctx)
return net
def quantize_net(network, quantized_dtype='auto', quantize_mode='full',
exclude_layers=None, exclude_layers_match=None, exclude_operators=None,
calib_data=None, data_shapes=None, calib_mode='none',
num_calib_examples=None, ctx=cpu(), logger=None):
"""User-level API for Gluon users to generate a quantized SymbolBlock from a FP32 HybridBlock w/ or w/o calibration.
Will be deprecated after MXNet 2.0, please use quantize_net_v2.
"""
warnings.warn('WARNING: This will be deprecated after MXNet 2.0, please use quantize_net_v2.')
return quantize_net_v2(network=network, quantized_dtype=quantized_dtype,
quantize_mode=quantize_mode,
quantize_granularity='tensor-wise',
exclude_layers=exclude_layers,
exclude_layers_match=exclude_layers_match,
exclude_operators=exclude_operators,
calib_data=calib_data, data_shapes=data_shapes,
calib_mode=calib_mode, num_calib_examples=num_calib_examples,
ctx=ctx, LayerOutputCollector=None, logger=logger)
|
[] |
[] |
[
"MXNET_QUANTIZATION_VERBOSE"
] |
[]
|
["MXNET_QUANTIZATION_VERBOSE"]
|
python
| 1 | 0 | |
tiled/client/context.py
|
import contextlib
import enum
import getpass
import os
import secrets
import threading
import urllib.parse
from pathlib import Path, PurePosixPath
import appdirs
import httpx
import msgpack
from ..utils import DictView
from .cache import Revalidate
from .utils import (
ASYNC_EVENT_HOOKS,
DEFAULT_ACCEPTED_ENCODINGS,
NotAvailableOffline,
handle_error,
)
DEFAULT_TOKEN_CACHE = os.getenv(
"TILED_TOKEN_CACHE", os.path.join(appdirs.user_config_dir("tiled"), "tokens")
)
def _token_directory(token_cache, netloc):
return Path(
token_cache,
urllib.parse.quote_plus(
netloc.decode()
), # Make a valid filename out of hostname:port.
)
def logout(uri_or_profile, *, token_cache=DEFAULT_TOKEN_CACHE):
"""
Logout of a given session.
If not logged in, calling this function has no effect.
Parameters
----------
uri_or_profile : str
token_directory : str or Path, optional
Returns
-------
netloc : str
"""
if isinstance(token_cache, (str, Path)):
netloc = _netloc_from_uri_or_profile(uri_or_profile)
directory = _token_directory(token_cache, netloc)
else:
netloc = None # unknowable
token_cache = TokenCache(directory)
token_cache.pop("refresh_token", None)
return netloc
def sessions(token_directory=DEFAULT_TOKEN_CACHE):
"""
List all sessions.
Note that this may include expired sessions. It does not confirm that
any cached tokens are still valid.
Parameters
----------
token_directory : str or Path, optional
Returns
-------
tokens : dict
Maps netloc to refresh_token
"""
tokens = {}
for directory in Path(token_directory).iterdir():
if not directory.is_dir():
# Some stray file. Ignore it.
continue
refresh_token_file = directory / "refresh_token"
netloc = directory.name
if refresh_token_file.is_file():
with open(refresh_token_file) as file:
token = file.read()
tokens[netloc] = token
return tokens
def logout_all(token_directory=DEFAULT_TOKEN_CACHE):
"""
Logout of a all sessions.
If not logged in to any sessions, calling this function has no effect.
Parameters
----------
token_directory : str or Path, optional
Returns
-------
logged_out_from : list
List of netloc of logged-out sessions
"""
logged_out_from = []
for directory in Path(token_directory).iterdir():
if not directory.is_dir():
# Some stray file. Ignore it.
continue
refresh_token_file = directory / "refresh_token"
if refresh_token_file.is_file():
refresh_token_file.unlink()
netloc = directory.name
logged_out_from.append(netloc)
return logged_out_from
def _netloc_from_uri_or_profile(uri_or_profile):
if uri_or_profile.startswith("http://") or uri_or_profile.startswith("https://"):
# This looks like a URI.
uri = uri_or_profile
else:
# Is this a profile name?
from ..profiles import load_profiles
profiles = load_profiles()
if uri_or_profile in profiles:
profile_name = uri_or_profile
_, profile_content = profiles[profile_name]
if "uri" in profile_content:
uri = profile_content["uri"]
else:
raise ValueError(
"Logout does not apply to profiles with inline ('direct') "
"server configuration."
)
else:
raise ValueError(
f"Not sure what to do with tree {uri_or_profile!r}. "
"It does not look like a URI (it does not start with http[s]://) "
"and it does not match any profiles."
)
return httpx.URL(uri).netloc
class CannotRefreshAuthentication(Exception):
pass
class PromptForReauthentication(enum.Enum):
AT_INIT = "at_init"
NEVER = "never"
ALWAYS = "always"
class Context:
"""
Wrap an httpx.Client with an optional cache and authentication functionality.
"""
def __init__(
self,
client,
*,
authentication_uri=None,
username=None,
cache=None,
offline=False,
token_cache=DEFAULT_TOKEN_CACHE,
prompt_for_reauthentication=PromptForReauthentication.AT_INIT,
app=None,
):
authentication_uri = authentication_uri or "/"
if not authentication_uri.endswith("/"):
authentication_uri += "/"
self._client = client
self._authentication_uri = authentication_uri
self._cache = cache
self._revalidate = Revalidate.IF_WE_MUST
self._username = username
self._offline = offline
self._token_cache_or_root_directory = token_cache
self._prompt_for_reauthentication = PromptForReauthentication(
prompt_for_reauthentication
)
self._refresh_lock = threading.Lock()
if isinstance(token_cache, (str, Path)):
directory = _token_directory(token_cache, self._client.base_url.netloc)
token_cache = TokenCache(directory)
self._token_cache = token_cache
# The token *cache* is optional. The tokens attrbiute is always present,
# and it isn't actually used for anything internally. It's just a view
# of the current tokens.
self._tokens = {}
self._app = app
# Make an initial "safe" request to let the server set the CSRF cookie.
# https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#double-submit-cookie
if offline:
self._handshake_data = self.get_json(self._authentication_uri)
else:
# We need a CSRF token.
with self.disable_cache(allow_read=False, allow_write=True):
self._handshake_data = self.get_json(self._authentication_uri)
# Ask the server what its root_path is.
if (not offline) and (
self._handshake_data["authentication"]["required"] or (username is not None)
):
if self._handshake_data["authentication"]["type"] in (
"password",
"external",
):
# Authenticate. If a valid refresh_token is available in the token_cache,
# it will be used. Otherwise, this will prompt for input from the stdin
# or raise CannotRefreshAuthentication.
prompt = (
prompt_for_reauthentication == PromptForReauthentication.AT_INIT
or prompt_for_reauthentication == PromptForReauthentication.ALWAYS
)
tokens = self.reauthenticate(prompt=prompt)
access_token = tokens["access_token"]
client.headers["Authorization"] = f"Bearer {access_token}"
base_path = self._handshake_data["meta"]["root_path"]
url = httpx.URL(self._client.base_url)
base_url = urllib.parse.urlunsplit(
(url.scheme, url.netloc.decode(), base_path, {}, url.fragment)
)
client.base_url = base_url
path_parts = list(PurePosixPath(url.path).relative_to(base_path).parts)
if path_parts:
# Strip "/node/metadata"
path_parts.pop(1)
self._path_parts = path_parts
@property
def tokens(self):
"A view of the current access and refresh tokens."
return DictView(self._tokens)
@property
def cache(self):
return self._cache
@property
def offline(self):
return self._offline
@offline.setter
def offline(self, value):
self._offline = bool(value)
if not self._offline:
# We need a CSRF token.
with self.disable_cache(allow_read=False, allow_write=True):
self._handshake_data = self.get_json(self._authentication_uri)
@property
def app(self):
return self._app
@property
def path_parts(self):
return self._path_parts
@property
def base_url(self):
return self._client.base_url
@property
def event_hooks(self):
"httpx.Client event hooks. This is exposed for testing."
return self._client.event_hooks
@property
def revalidate(self):
"""
This controls how aggressively to check whether cache entries are out of date.
- FORCE: Always revalidate (generally too aggressive and expensive)
- IF_EXPIRED: Revalidate if the "Expire" date provided by the server has passed
- IF_WE_MUST: Only revalidate if the server indicated that is is a
particularly volatile entry, such as a search result to a dynamic query.
"""
return self._revalidate
@revalidate.setter
def revalidate(self, value):
self._revalidate = Revalidate(value)
@contextlib.contextmanager
def revalidation(self, revalidate):
"""
Temporarily change the 'revalidate' property in a context.
Parameters
----------
revalidate: string or tiled.client.cache.Revalidate enum member
"""
try:
member = Revalidate(revalidate)
except ValueError as err:
# This raises a more helpful error that lists the valid options.
raise ValueError(
f"Revalidation {revalidate} not recognized. Must be one of {set(Revalidate.__members__)}"
) from err
original = self.revalidate
self.revalidate = member
yield
# Upon leaving context, set it back.
self.revalidate = original
@contextlib.contextmanager
def disable_cache(self, allow_read=False, allow_write=False):
self._disable_cache_read = not allow_read
self._disable_cache_write = not allow_write
yield
self._disable_cache_read = False
self._disable_cache_write = False
def get_content(self, path, accept=None, stream=False, revalidate=None, **kwargs):
if revalidate is None:
# Fallback to context default.
revalidate = self.revalidate
request = self._client.build_request("GET", path, **kwargs)
if accept:
request.headers["Accept"] = accept
url = request.url
if self._offline:
# We must rely on the cache alone.
# The role of a 'reservation' is to ensure that the content
# of interest is not evicted from the cache between the moment
# that we start verifying its validity and the moment that
# we actually read the content. It is used more extensively
# below.
reservation = self._cache.get_reservation(url)
if reservation is None:
raise NotAvailableOffline(url)
content = reservation.load_content()
if content is None:
# TODO Do we ever get here?
raise NotAvailableOffline(url)
return content
if self._cache is None:
# No cache, so we can use the client straightforwardly.
response = self._send(request, stream=stream)
handle_error(response)
if response.headers.get("content-encoding") == "blosc":
import blosc
return blosc.decompress(response.content)
return response.content
# If we get this far, we have an online client and a cache.
# Parse Cache-Control header directives.
cache_control = {
directive.lstrip(" ")
for directive in request.headers.get("Cache-Control", "").split(",")
}
if "no-cache" in cache_control:
reservation = None
else:
reservation = self._cache.get_reservation(url)
try:
if reservation is not None:
is_stale = reservation.is_stale()
if not (
# This condition means "client user wants us to unconditionally revalidate"
(revalidate == Revalidate.FORCE)
or
# This condition means "client user wants us to revalidate if expired"
(is_stale and (revalidate == Revalidate.IF_EXPIRED))
or
# This condition means "server really wants us to revalidate"
(is_stale and reservation.item.must_revalidate)
or self._disable_cache_read
):
# Short-circuit. Do not even bother consulting the server.
return reservation.load_content()
if not self._disable_cache_read:
request.headers["If-None-Match"] = reservation.item.etag
response = self._send(request, stream=stream)
handle_error(response)
if response.status_code == 304: # HTTP 304 Not Modified
# Update the expiration time.
reservation.renew(response.headers.get("expires"))
# Read from the cache
return reservation.load_content()
elif not response.is_error:
etag = response.headers.get("ETag")
encoding = response.headers.get("Content-Encoding")
content = response.content
# httpx handles standard HTTP encodings transparently, but we have to
# handle "blosc" manually.
if encoding == "blosc":
import blosc
content = blosc.decompress(content)
if (
("no-store" not in cache_control)
and (etag is not None)
and (not self._disable_cache_write)
):
# Write to cache.
self._cache.put(
url,
response.headers,
content,
)
return content
else:
raise NotImplementedError(
f"Unexpected status_code {response.status_code}"
)
finally:
if reservation is not None:
reservation.ensure_released()
def get_json(self, path, stream=False, **kwargs):
return msgpack.unpackb(
self.get_content(
path, accept="application/x-msgpack", stream=stream, **kwargs
),
timestamp=3, # Decode msgpack Timestamp as datetime.datetime object.
)
def _send(self, request, stream=False, attempts=0):
"""
If sending results in an authentication error, reauthenticate.
"""
response = self._client.send(request, stream=stream)
if (response.status_code == 401) and (attempts == 0):
# Try refreshing the token.
tokens = self.reauthenticate()
# The line above updated self._client.headers["authorization"]
# so we will have a fresh token for the next call to
# client.build_request(...), but we need to retroactively patch the
# authorization header for this request and then re-send.
access_token = tokens["access_token"]
auth_header = f"Bearer {access_token}"
request.headers["authorization"] = auth_header
return self._send(request, stream=stream, attempts=1)
return response
def authenticate(self):
"Authenticate. Prompt for password or access code (refresh token)."
auth_type = self._handshake_data["authentication"]["type"]
if auth_type == "password":
username = self._username or input("Username: ")
password = getpass.getpass()
form_data = {
"grant_type": "password",
"username": username,
"password": password,
}
token_request = self._client.build_request(
"POST",
f"{self._authentication_uri}auth/token",
data=form_data,
headers={},
)
token_request.headers.pop("Authorization", None)
token_response = self._client.send(token_request)
handle_error(token_response)
tokens = token_response.json()
refresh_token = tokens["refresh_token"]
elif auth_type == "external":
endpoint = self._handshake_data["authentication"]["endpoint"]
print(
f"""
Navigate web browser to this address to obtain access code:
{endpoint}
"""
)
while True:
# The proper term for this is 'refresh token' but that may be
# confusing jargon to the end user, so we say "access code".
raw_refresh_token = getpass.getpass("Access code (quotes optional): ")
if not raw_refresh_token:
print("No access token given. Failed.")
break
# Remove any accidentally-included quotes.
refresh_token = raw_refresh_token.replace('"', "")
# Immediately refresh to (1) check that the copy/paste worked and
# (2) obtain an access token as well.
try:
tokens = self._refresh(refresh_token=refresh_token)
except CannotRefreshAuthentication:
print(
"That didn't work. Try pasting the access code again, or press Enter to escape."
)
else:
break
confirmation_message = self._handshake_data["authentication"][
"confirmation_message"
]
if confirmation_message:
username = username = self.whoami()
print(confirmation_message.format(username=username))
elif auth_type == "api_key":
raise ValueError(
"authenticate() method is not applicable to API key authentication"
)
else:
raise ValueError(f"Server has unknown authentication type {auth_type!r}")
if self._token_cache is not None:
# We are using a token cache. Store the new refresh token.
self._token_cache["refresh_token"] = refresh_token
self._tokens.update(
refresh_token=tokens["refresh_token"], access_token=tokens["access_token"]
)
return tokens
def reauthenticate(self, prompt=None):
"""
Refresh authentication.
Parameters
----------
prompt : bool
If True, give interactive prompt for authentication when refreshing
tokens fails. If False raise an error. If None, fall back
to default `prompt_for_reauthentication` set in Context.__init__.
"""
try:
return self._refresh()
except CannotRefreshAuthentication:
if prompt is None:
prompt = self._prompt_for_reauthentication
if prompt:
return self.authenticate()
raise
def whoami(self):
"Return username."
request = self._client.build_request(
"GET", f"{self._authentication_uri}auth/whoami"
)
response = self._client.send(request)
handle_error(response)
return response.json()["username"]
def logout(self):
"""
Clear the access token and the cached refresh token.
This method is idempotent.
"""
self._client.headers.pop("Authorization", None)
if self._token_cache is not None:
self._token_cache.pop("refresh_token", None)
self._tokens.clear()
def _refresh(self, refresh_token=None):
with self._refresh_lock:
if refresh_token is None:
if self._token_cache is None:
# We are not using a token cache.
raise CannotRefreshAuthentication(
"No token cache was given. "
"Provide fresh credentials. "
"For a given client c, use c.context.authenticate()."
)
# We are using a token_cache.
try:
refresh_token = self._token_cache["refresh_token"]
except KeyError:
raise CannotRefreshAuthentication(
"No refresh token was found in token cache. "
"Provide fresh credentials. "
"For a given client c, use c.context.authenticate()."
)
token_request = self._client.build_request(
"POST",
f"{self._authentication_uri}auth/token/refresh",
json={"refresh_token": refresh_token},
# Submit CSRF token in both header and cookie.
# https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#double-submit-cookie
headers={"x-csrf": self._client.cookies["tiled_csrf"]},
)
token_request.headers.pop("Authorization", None)
token_response = self._client.send(token_request)
if token_response.status_code == 401:
# Refreshing the token failed.
# Discard the expired (or otherwise invalid) refresh_token file.
self._token_cache.pop("refresh_token", None)
raise CannotRefreshAuthentication(
"Server rejected attempt to refresh token. "
"Provide fresh credentials. "
"For a given client c, use c.context.authenticate()."
)
handle_error(token_response)
tokens = token_response.json()
# If we get this far, reauthentication worked.
# Store the new refresh token.
self._token_cache["refresh_token"] = tokens["refresh_token"]
# Update the client's Authentication header.
access_token = tokens["access_token"]
auth_header = f"Bearer {access_token}"
self._client.headers["authorization"] = auth_header
self._tokens.update(
refresh_token=tokens["refresh_token"],
access_token=tokens["access_token"],
)
return tokens
def context_from_tree(
tree,
authentication,
server_settings,
*,
query_registry=None,
serialization_registry=None,
compression_registry=None,
cache=None,
offline=False,
token_cache=DEFAULT_TOKEN_CACHE,
prompt_for_reauthentication=PromptForReauthentication.AT_INIT,
username=None,
headers=None,
):
from ..server.app import serve_tree
# By default make it "public" because there is no way to
# secure access from inside the same process anyway.
authentication = authentication or {"allow_anonymous_access": True}
server_settings = server_settings or {}
params = {}
headers = headers or {}
headers.setdefault("accept-encoding", ",".join(DEFAULT_ACCEPTED_ENCODINGS))
# If a single-user API key will be used, generate the key here instead of
# letting serve_tree do it for us, so that we can give it to the client
# below.
if (
(authentication.get("authenticator") is None)
and (not authentication.get("allow_anonymous_access", False))
and (authentication.get("single_user_api_key") is None)
):
single_user_api_key = os.getenv(
"TILED_SINGLE_USER_API_KEY", secrets.token_hex(32)
)
authentication["single_user_api_key"] = single_user_api_key
params["api_key"] = single_user_api_key
app = serve_tree(
tree,
authentication,
server_settings,
query_registry=query_registry,
serialization_registry=serialization_registry,
compression_registry=compression_registry,
)
# Only an AsyncClient can be used over ASGI.
# We wrap all the async methods in a call to asyncio.run(...).
# Someday we should explore asynchronous Tiled Client objects.
from ._async_bridge import AsyncClientBridge
async def startup():
# Note: This is important. The Tiled server routes are defined lazily on
# startup.
await app.router.startup()
client = AsyncClientBridge(
base_url="http://local-tiled-app",
params=params,
app=app,
_startup_hook=startup,
event_hooks=ASYNC_EVENT_HOOKS,
headers=headers,
timeout=httpx.Timeout(5.0, read=20.0),
)
# Block for application startup.
try:
client.wait_until_ready(10)
except TimeoutError:
raise TimeoutError("Application startup has timed out.")
# TODO How to close the httpx.AsyncClient more cleanly?
import atexit
atexit.register(client.close)
return Context(
client,
cache=cache,
offline=offline,
token_cache=token_cache,
username=username,
prompt_for_reauthentication=prompt_for_reauthentication,
app=app,
)
class TokenCache:
"A (partial) dict interface backed by files with restrictive permissions"
def __init__(self, directory):
self._directory = Path(directory)
self._directory.mkdir(exist_ok=True, parents=True)
def __getitem__(self, key):
filepath = self._directory / key
try:
with open(filepath, "r") as file:
return file.read()
except FileNotFoundError:
raise KeyError(key)
def __setitem__(self, key, value):
if not isinstance(value, str):
raise ValueError("Expected string value, got {value!r}")
filepath = self._directory / key
filepath.touch(mode=0o600) # Set permissions.
with open(filepath, "w") as file:
file.write(value)
def __delitem__(self, key):
filepath = self._directory / key
filepath.unlink(missing_ok=False)
def pop(self, key, fallback=None):
filepath = self._directory / key
try:
with open(filepath, "r") as file:
content = file.read()
except FileNotFoundError:
content = fallback
filepath.unlink(missing_ok=True)
return content
|
[] |
[] |
[
"TILED_TOKEN_CACHE",
"TILED_SINGLE_USER_API_KEY"
] |
[]
|
["TILED_TOKEN_CACHE", "TILED_SINGLE_USER_API_KEY"]
|
python
| 2 | 0 | |
runway/templates/k8s-cfn-repo/k8s-master.cfn/k8s_hooks/bootstrap.py
|
"""Execute the AWS CLI update-kubeconfig command."""
from __future__ import print_function
import os
import logging
import shutil
import six
import yaml
LOGGER = logging.getLogger(__name__)
def copy_template_to_env(path, env, region):
"""Copy k8s module template into new environment directory."""
overlays_dir = os.path.join(path, 'overlays')
template_dir = os.path.join(overlays_dir, 'template')
env_dir = os.path.join(overlays_dir, env)
if os.path.isdir(template_dir):
if os.path.isdir(env_dir) or (
os.path.isdir("%s-%s" % (env_dir, region))):
LOGGER.info("Bootstrap of k8s module at \"%s\" skipped; module "
"already has a config for this environment", path)
else:
LOGGER.info("Copying overlay template at \"%s\" to new "
"environment directory \"%s\"", template_dir, env_dir)
shutil.copytree(template_dir, env_dir, symlinks=True)
# Update templated environment name in files
for i in ['kustomization.yaml',
# namespace files can't be directly kustomized
'namespace.yaml']:
templated_file_path = os.path.join(env_dir, i)
if os.path.isfile(templated_file_path):
with open(templated_file_path, 'r') as stream:
filedata = stream.read()
if 'REPLACEMEENV' in filedata:
filedata = filedata.replace('REPLACEMEENV', env)
with open(templated_file_path, 'w') as stream:
stream.write(filedata)
else:
LOGGER.info("Skipping bootstrap of k8s module at \"%s\"; no template "
"directory present", path)
def create_runway_environments(provider, context, **kwargs): # noqa pylint: disable=unused-argument
"""Copy k8s module templates into new environment directories.
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
"""
LOGGER.info("Bootstrapping runway k8s modules, looking for unconfigured "
"environments...")
environment = kwargs['namespace']
region = os.environ.get('AWS_DEFAULT_REGION')
env_root = os.path.dirname(
os.path.realpath(os.environ.get('RUNWAYCONFIG'))
)
with open(os.environ.get('RUNWAYCONFIG')) as data_file:
runway_config = yaml.safe_load(data_file)
for deployment in runway_config.get('deployments', []):
for module in deployment.get('modules', []):
if isinstance(module, six.string_types):
path = module
else:
path = module.get('path')
if path.endswith('.k8s'):
copy_template_to_env(os.path.join(env_root, path),
environment,
region)
return True
|
[] |
[] |
[
"AWS_DEFAULT_REGION",
"RUNWAYCONFIG"
] |
[]
|
["AWS_DEFAULT_REGION", "RUNWAYCONFIG"]
|
python
| 2 | 0 | |
scripts/replace-commands-help.py
|
import dataclasses
import logging
import os
import re
import subprocess
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
commands_md_path = os.path.join(os.path.dirname(__file__), "../docs/commands.md")
@dataclasses.dataclass
class Section:
start: int
end: int
command: str
section: str
code: bool
def find_next_section(lines, start):
begin_pattern = re.compile(r"<!-- BEGIN SECTION \"([^\"]*)\" \"([^\"]*)\" (true|false) -->")
end_pattern = re.compile(r"<!-- END SECTION -->")
for i in range(start, len(lines)):
m = begin_pattern.match(lines[i])
if not m:
continue
command = m.group(1)
section = m.group(2)
code = m.group(3) == "true"
for j in range(i + 1, len(lines)):
m = end_pattern.match(lines[j])
if not m:
continue
return Section(start=i, end=j, command=command, section=section, code=code)
return None
def count_indent(str):
indent = 0
for i in range(len(str)):
if str[i] != " ":
break
indent += 1
return indent
def get_help_section(command, section):
logger.info("Getting section '%s' from command '%s'" % (section, command))
args = ["kluctl"]
if command:
args += [command]
args += ["--help"]
env = {
# This is a good value to be rendered into markdown
"COLUMNS": "120",
**os.environ,
}
r = subprocess.run(args, env=env, capture_output=True, text=True, check=False)
if r.returncode != 0:
logger.error("kluctl call failed with exit code %d\nstdout=%s\nstderr=%s" % (r.returncode, r.stdout, r.stderr))
raise Exception("kluctl call failed with exit code %d" % r.returncode)
lines = r.stdout.splitlines()
section_start = None
for i in range(len(lines)):
indent = count_indent(lines[i])
if lines[i][indent:].startswith("%s:" % section):
section_start = i
break
if section_start is None:
raise Exception("Section %s not found in command %s" % (section, command))
ret = [lines[section_start] + "\n"]
section_indent = count_indent(lines[section_start])
for i in range(section_start + 1, len(lines)):
indent = count_indent(lines[i])
if lines[i] != "" and indent <= section_indent:
break
ret.append(lines[i] + "\n")
return ret
with open(commands_md_path) as f:
lines = f.readlines()
new_lines = []
pos = 0
while True:
s = find_next_section(lines, pos)
if s is None:
new_lines += lines[pos:]
break
new_lines += lines[pos:s.start + 1]
s2 = get_help_section(s.command, s.section)
if s.code:
new_lines += ["```\n"]
new_lines += s2
if s.code:
new_lines += ["```\n"]
new_lines += [lines[s.end]]
pos = s.end + 1
if lines != new_lines:
with open(commands_md_path, mode="w") as f:
f.writelines(new_lines)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
ext/tournament/tourn.py
|
import random
from discord.ext.commands import CommandError
class SizeError(CommandError):
pass
class MaxSizeReached(CommandError):
pass
class NoParticipants(CommandError):
pass
class ParticipantNotFound(CommandError):
pass
class ParticipantAlreadyExists(CommandError):
pass
class BracketAlreadyGenerated(CommandError):
pass
class MaxTeamSizeReached(CommandError):
pass
class StrictError(CommandError):
pass
class NotInBracket(CommandError):
pass
class Tourney:
def __init__(self, size : int, team_size : int, hosts : tuple, strict : bool = False):
self._checkSize(size)
self.size = size
self.HOSTS = hosts
self.team_size = team_size
self.strict = strict
self.rounds_completed = 0
self.loser_rounds_completed = 0
self.participants = []
self.winners = []
self.brackets = []
self.loser_brackets = []
self.losers = []
self.team_data = {}
@property
def rounds(self):
return len(self.participants) // 2
def _checkSize(self, size):
if size % 2 != 0 or size <= 0:
raise SizeError
def generate_bracket(self):
if len(self.brackets) > 0:
raise BracketAlreadyGenerated
self._checkSize(len(self.participants))
random.shuffle(self.participants)
self.brackets = [(self.participants[i], self.participants[i+1]) for i in range(0,len(self.participants)-1, 2)]
#Stops more people from joining once bracket generated
self.size = len(self.participants)
return self.brackets
def add_participant(self, name : str, team_members : tuple):
if len(self.participants) == self.size:
raise MaxSizeReached
if len(team_members) > self.team_size:
raise MaxTeamSizeReached
if self.strict and (len(team_members) < self.team_size):
raise StrictError
if name in self.participants:
raise ParticipantAlreadyExists
self.participants.append(name)
self.team_data[name] = {
'members': team_members,
'wins' : 0,
'loses' : 0,
}
def remove_participant(self, name : str):
if len(self.participants) == 0:
raise NoParticipants
if name not in self.participants:
raise ParticipantNotFound
self.participants.remove(name)
self.team_data.pop(name, None)
def winner(self, name : str):
self.rounds_completed += 1
if name in self.winners or name in self.losers:
raise NotInBracket
if name not in self.participants:
raise ParticipantNotFound
for i in self.brackets:
t1, t2 = i
if t1.lower() == name.lower():
self.winners.append(t1)
self.losers.append(t2)
self.team_data[t1]['wins'] += 1
self.team_data[t2]['loses'] += 1
self.brackets.remove(i)
break
if t2.lower() == name.lower():
self.winners.append(t2)
self.losers.append(t1)
self.team_data[t2]['wins'] += 1
self.team_data[t1]['loses'] += 1
self.brackets.remove(i)
break
if len(self.winners) >= 2:
x = self.winners[0]
y = self.winners[1]
self.brackets.append((x, y))
self.winners.remove(x)
self.winners.remove(y)
if len(self.losers) >= 2:
x = self.losers[0]
y = self.losers[1]
self.loser_brackets.append((x, y))
self.losers.remove(x)
self.losers.remove(y)
if self.rounds_completed == len(self.participants)-1:
return self.winners[0]
def loser_bracket_winner(self, name : str):
if name in self.losers and len(self.participants)-2 == 0:
return self.losers[0]
if name in self.winners or name in self.losers:
raise NotInBracket
if name not in self.participants:
raise ParticipantNotFound
self.loser_rounds_completed += 1
for i in self.loser_brackets:
t1, t2 = i
if t1.lower() == name.lower():
self.losers.append(t1)
self.team_data[t1]['wins'] += 1
self.team_data[t2]['loses'] += 1
self.loser_brackets.remove(i)
break
if t2.lower() == name.lower():
self.losers.append(t2)
self.team_data[t2]['wins'] += 1
self.team_data[t1]['loses'] += 1
self.loser_brackets.remove(i)
break
if len(self.losers) >= 2:
x = self.losers[0]
y = self.losers[1]
self.loser_brackets.append((x, y))
self.losers.remove(x)
self.losers.remove(y)
if self.loser_rounds_completed == len(self.participants)-2:
return self.losers[0]
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
methods.py
|
import os
import re
import glob
import subprocess
from collections import OrderedDict
# We need to define our own `Action` method to control the verbosity of output
# and whenever we need to run those commands in a subprocess on some platforms.
from SCons.Script import Action
from SCons import Node
from SCons.Script import Glob
from platform_methods import run_in_subprocess
def add_source_files(self, sources, files, warn_duplicates=True):
# Convert string to list of absolute paths (including expanding wildcard)
if isinstance(files, (str, bytes)):
# Keep SCons project-absolute path as they are (no wildcard support)
if files.startswith("#"):
if "*" in files:
print("ERROR: Wildcards can't be expanded in SCons project-absolute path: '{}'".format(files))
return
files = [files]
else:
dir_path = self.Dir(".").abspath
files = sorted(glob.glob(dir_path + "/" + files))
# Add each path as compiled Object following environment (self) configuration
for path in files:
obj = self.Object(path)
if obj in sources:
if warn_duplicates:
print('WARNING: Object "{}" already included in environment sources.'.format(obj))
else:
continue
sources.append(obj)
def disable_warnings(self):
# 'self' is the environment
if self.msvc:
# We have to remove existing warning level defines before appending /w,
# otherwise we get: "warning D9025 : overriding '/W3' with '/w'"
warn_flags = ["/Wall", "/W4", "/W3", "/W2", "/W1", "/WX"]
self.Append(CCFLAGS=["/w"])
self.Append(CFLAGS=["/w"])
self.Append(CXXFLAGS=["/w"])
self["CCFLAGS"] = [x for x in self["CCFLAGS"] if not x in warn_flags]
self["CFLAGS"] = [x for x in self["CFLAGS"] if not x in warn_flags]
self["CXXFLAGS"] = [x for x in self["CXXFLAGS"] if not x in warn_flags]
else:
self.Append(CCFLAGS=["-w"])
self.Append(CFLAGS=["-w"])
self.Append(CXXFLAGS=["-w"])
def add_module_version_string(self, s):
self.module_version_string += "." + s
def update_version(module_version_string=""):
build_name = "custom_build"
if os.getenv("BUILD_NAME") != None:
build_name = os.getenv("BUILD_NAME")
print("Using custom build name: " + build_name)
import version
# NOTE: It is safe to generate this file here, since this is still executed serially
f = open("core/version_generated.gen.h", "w")
f.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
f.write("#ifndef VERSION_GENERATED_GEN_H\n")
f.write("#define VERSION_GENERATED_GEN_H\n")
f.write('#define VERSION_SHORT_NAME "' + str(version.short_name) + '"\n')
f.write('#define VERSION_NAME "' + str(version.name) + '"\n')
f.write("#define VERSION_MAJOR " + str(version.major) + "\n")
f.write("#define VERSION_MINOR " + str(version.minor) + "\n")
f.write("#define VERSION_PATCH " + str(version.patch) + "\n")
f.write('#define VERSION_STATUS "' + str(version.status) + '"\n')
f.write('#define VERSION_BUILD "' + str(build_name) + '"\n')
f.write('#define VERSION_MODULE_CONFIG "' + str(version.module_config) + module_version_string + '"\n')
f.write("#define VERSION_YEAR " + str(version.year) + "\n")
f.write('#define VERSION_WEBSITE "' + str(version.website) + '"\n')
f.write("#endif // VERSION_GENERATED_GEN_H\n")
f.close()
# NOTE: It is safe to generate this file here, since this is still executed serially
fhash = open("core/version_hash.gen.h", "w")
fhash.write("/* THIS FILE IS GENERATED DO NOT EDIT */\n")
fhash.write("#ifndef VERSION_HASH_GEN_H\n")
fhash.write("#define VERSION_HASH_GEN_H\n")
githash = ""
gitfolder = ".git"
if os.path.isfile(".git"):
module_folder = open(".git", "r").readline().strip()
if module_folder.startswith("gitdir: "):
gitfolder = module_folder[8:]
if os.path.isfile(os.path.join(gitfolder, "HEAD")):
head = open(os.path.join(gitfolder, "HEAD"), "r", encoding="utf8").readline().strip()
if head.startswith("ref: "):
head = os.path.join(gitfolder, head[5:])
if os.path.isfile(head):
githash = open(head, "r").readline().strip()
else:
githash = head
fhash.write('#define VERSION_HASH "' + githash + '"\n')
fhash.write("#endif // VERSION_HASH_GEN_H\n")
fhash.close()
def parse_cg_file(fname, uniforms, sizes, conditionals):
fs = open(fname, "r")
line = fs.readline()
while line:
if re.match(r"^\s*uniform", line):
res = re.match(r"uniform ([\d\w]*) ([\d\w]*)")
type = res.groups(1)
name = res.groups(2)
uniforms.append(name)
if type.find("texobj") != -1:
sizes.append(1)
else:
t = re.match(r"float(\d)x(\d)", type)
if t:
sizes.append(int(t.groups(1)) * int(t.groups(2)))
else:
t = re.match(r"float(\d)", type)
sizes.append(int(t.groups(1)))
if line.find("[branch]") != -1:
conditionals.append(name)
line = fs.readline()
fs.close()
def detect_modules(at_path):
module_list = OrderedDict() # name : path
modules_glob = os.path.join(at_path, "*")
files = glob.glob(modules_glob)
files.sort() # so register_module_types does not change that often, and also plugins are registered in alphabetic order
for x in files:
if not is_module(x):
continue
name = os.path.basename(x)
path = x.replace("\\", "/") # win32
module_list[name] = path
return module_list
def is_module(path):
return os.path.isdir(path) and os.path.exists(os.path.join(path, "SCsub"))
def write_modules(module_list):
includes_cpp = ""
preregister_cpp = ""
register_cpp = ""
unregister_cpp = ""
for name, path in module_list.items():
try:
with open(os.path.join(path, "register_types.h")):
includes_cpp += '#include "' + path + '/register_types.h"\n'
preregister_cpp += "#ifdef MODULE_" + name.upper() + "_ENABLED\n"
preregister_cpp += "#ifdef MODULE_" + name.upper() + "_HAS_PREREGISTER\n"
preregister_cpp += "\tpreregister_" + name + "_types();\n"
preregister_cpp += "#endif\n"
preregister_cpp += "#endif\n"
register_cpp += "#ifdef MODULE_" + name.upper() + "_ENABLED\n"
register_cpp += "\tregister_" + name + "_types();\n"
register_cpp += "#endif\n"
unregister_cpp += "#ifdef MODULE_" + name.upper() + "_ENABLED\n"
unregister_cpp += "\tunregister_" + name + "_types();\n"
unregister_cpp += "#endif\n"
except OSError:
pass
modules_cpp = """// register_module_types.gen.cpp
/* THIS FILE IS GENERATED DO NOT EDIT */
#include "register_module_types.h"
#include "modules/modules_enabled.gen.h"
%s
void preregister_module_types() {
%s
}
void register_module_types() {
%s
}
void unregister_module_types() {
%s
}
""" % (
includes_cpp,
preregister_cpp,
register_cpp,
unregister_cpp,
)
# NOTE: It is safe to generate this file here, since this is still executed serially
with open("modules/register_module_types.gen.cpp", "w") as f:
f.write(modules_cpp)
def convert_custom_modules_path(path):
if not path:
return path
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
err_msg = "Build option 'custom_modules' must %s"
if not os.path.isdir(path):
raise ValueError(err_msg % "point to an existing directory.")
if path == os.path.realpath("modules"):
raise ValueError(err_msg % "be a directory other than built-in `modules` directory.")
if is_module(path):
raise ValueError(err_msg % "point to a directory with modules, not a single module.")
return path
def disable_module(self):
self.disabled_modules.append(self.current_module)
def module_check_dependencies(self, module, dependencies):
"""
Checks if module dependencies are enabled for a given module,
and prints a warning if they aren't.
Meant to be used in module `can_build` methods.
Returns a boolean (True if dependencies are satisfied).
"""
missing_deps = []
for dep in dependencies:
opt = "module_{}_enabled".format(dep)
if not opt in self or not self[opt]:
missing_deps.append(dep)
if missing_deps != []:
print(
"Disabling '{}' module as the following dependencies are not satisfied: {}".format(
module, ", ".join(missing_deps)
)
)
return False
else:
return True
def use_windows_spawn_fix(self, platform=None):
if os.name != "nt":
return # not needed, only for windows
# On Windows, due to the limited command line length, when creating a static library
# from a very high number of objects SCons will invoke "ar" once per object file;
# that makes object files with same names to be overwritten so the last wins and
# the library looses symbols defined by overwritten objects.
# By enabling quick append instead of the default mode (replacing), libraries will
# got built correctly regardless the invocation strategy.
# Furthermore, since SCons will rebuild the library from scratch when an object file
# changes, no multiple versions of the same object file will be present.
self.Replace(ARFLAGS="q")
def mySubProcess(cmdline, env):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(
cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=startupinfo,
shell=False,
env=env,
)
_, err = proc.communicate()
rv = proc.wait()
if rv:
print("=====")
print(err)
print("=====")
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = " ".join(args[1:])
cmdline = cmd + " " + newargs
rv = 0
env = {str(key): str(value) for key, value in iter(env.items())}
if len(cmdline) > 32000 and cmd.endswith("ar"):
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3, len(args)):
rv = mySubProcess(cmdline + args[i], env)
if rv:
break
else:
rv = mySubProcess(cmdline, env)
return rv
self["SPAWN"] = mySpawn
def save_active_platforms(apnames, ap):
for x in ap:
names = ["logo"]
if os.path.isfile(x + "/run_icon.png"):
names.append("run_icon")
for name in names:
pngf = open(x + "/" + name + ".png", "rb")
b = pngf.read(1)
str = " /* AUTOGENERATED FILE, DO NOT EDIT */ \n"
str += " static const unsigned char _" + x[9:] + "_" + name + "[]={"
while len(b) == 1:
str += hex(ord(b))
b = pngf.read(1)
if len(b) == 1:
str += ","
str += "};\n"
pngf.close()
# NOTE: It is safe to generate this file here, since this is still executed serially
wf = x + "/" + name + ".gen.h"
with open(wf, "w") as pngw:
pngw.write(str)
def no_verbose(sys, env):
colors = {}
# Colors are disabled in non-TTY environments such as pipes. This means
# that if output is redirected to a file, it will not contain color codes
if sys.stdout.isatty():
colors["cyan"] = "\033[96m"
colors["purple"] = "\033[95m"
colors["blue"] = "\033[94m"
colors["green"] = "\033[92m"
colors["yellow"] = "\033[93m"
colors["red"] = "\033[91m"
colors["end"] = "\033[0m"
else:
colors["cyan"] = ""
colors["purple"] = ""
colors["blue"] = ""
colors["green"] = ""
colors["yellow"] = ""
colors["red"] = ""
colors["end"] = ""
compile_source_message = "{}Compiling {}==> {}$SOURCE{}".format(
colors["blue"], colors["purple"], colors["yellow"], colors["end"]
)
java_compile_source_message = "{}Compiling {}==> {}$SOURCE{}".format(
colors["blue"], colors["purple"], colors["yellow"], colors["end"]
)
compile_shared_source_message = "{}Compiling shared {}==> {}$SOURCE{}".format(
colors["blue"], colors["purple"], colors["yellow"], colors["end"]
)
link_program_message = "{}Linking Program {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
link_library_message = "{}Linking Static Library {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
ranlib_library_message = "{}Ranlib Library {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
link_shared_library_message = "{}Linking Shared Library {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
java_library_message = "{}Creating Java Archive {}==> {}$TARGET{}".format(
colors["red"], colors["purple"], colors["yellow"], colors["end"]
)
env.Append(CXXCOMSTR=[compile_source_message])
env.Append(CCCOMSTR=[compile_source_message])
env.Append(SHCCCOMSTR=[compile_shared_source_message])
env.Append(SHCXXCOMSTR=[compile_shared_source_message])
env.Append(ARCOMSTR=[link_library_message])
env.Append(RANLIBCOMSTR=[ranlib_library_message])
env.Append(SHLINKCOMSTR=[link_shared_library_message])
env.Append(LINKCOMSTR=[link_program_message])
env.Append(JARCOMSTR=[java_library_message])
env.Append(JAVACCOMSTR=[java_compile_source_message])
def detect_visual_c_compiler_version(tools_env):
# tools_env is the variable scons uses to call tools that execute tasks, SCons's env['ENV'] that executes tasks...
# (see the SCons documentation for more information on what it does)...
# in order for this function to be well encapsulated i choose to force it to receive SCons's TOOLS env (env['ENV']
# and not scons setup environment (env)... so make sure you call the right environment on it or it will fail to detect
# the proper vc version that will be called
# There is no flag to give to visual c compilers to set the architecture, ie scons bits argument (32,64,ARM etc)
# There are many different cl.exe files that are run, and each one compiles & links to a different architecture
# As far as I know, the only way to figure out what compiler will be run when Scons calls cl.exe via Program()
# is to check the PATH variable and figure out which one will be called first. Code below does that and returns:
# the following string values:
# "" Compiler not detected
# "amd64" Native 64 bit compiler
# "amd64_x86" 64 bit Cross Compiler for 32 bit
# "x86" Native 32 bit compiler
# "x86_amd64" 32 bit Cross Compiler for 64 bit
# There are other architectures, but Godot does not support them currently, so this function does not detect arm/amd64_arm
# and similar architectures/compilers
# Set chosen compiler to "not detected"
vc_chosen_compiler_index = -1
vc_chosen_compiler_str = ""
# Start with Pre VS 2017 checks which uses VCINSTALLDIR:
if "VCINSTALLDIR" in tools_env:
# print("Checking VCINSTALLDIR")
# find() works with -1 so big ifs below are needed... the simplest solution, in fact
# First test if amd64 and amd64_x86 compilers are present in the path
vc_amd64_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\amd64;")
if vc_amd64_compiler_detection_index > -1:
vc_chosen_compiler_index = vc_amd64_compiler_detection_index
vc_chosen_compiler_str = "amd64"
vc_amd64_x86_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\amd64_x86;")
if vc_amd64_x86_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_amd64_x86_compiler_detection_index
):
vc_chosen_compiler_index = vc_amd64_x86_compiler_detection_index
vc_chosen_compiler_str = "amd64_x86"
# Now check the 32 bit compilers
vc_x86_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN;")
if vc_x86_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_compiler_detection_index
):
vc_chosen_compiler_index = vc_x86_compiler_detection_index
vc_chosen_compiler_str = "x86"
vc_x86_amd64_compiler_detection_index = tools_env["PATH"].find(tools_env["VCINSTALLDIR"] + "BIN\\x86_amd64;")
if vc_x86_amd64_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_amd64_compiler_detection_index
):
vc_chosen_compiler_index = vc_x86_amd64_compiler_detection_index
vc_chosen_compiler_str = "x86_amd64"
# and for VS 2017 and newer we check VCTOOLSINSTALLDIR:
if "VCTOOLSINSTALLDIR" in tools_env:
# Newer versions have a different path available
vc_amd64_compiler_detection_index = (
tools_env["PATH"].upper().find(tools_env["VCTOOLSINSTALLDIR"].upper() + "BIN\\HOSTX64\\X64;")
)
if vc_amd64_compiler_detection_index > -1:
vc_chosen_compiler_index = vc_amd64_compiler_detection_index
vc_chosen_compiler_str = "amd64"
vc_amd64_x86_compiler_detection_index = (
tools_env["PATH"].upper().find(tools_env["VCTOOLSINSTALLDIR"].upper() + "BIN\\HOSTX64\\X86;")
)
if vc_amd64_x86_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_amd64_x86_compiler_detection_index
):
vc_chosen_compiler_index = vc_amd64_x86_compiler_detection_index
vc_chosen_compiler_str = "amd64_x86"
vc_x86_compiler_detection_index = (
tools_env["PATH"].upper().find(tools_env["VCTOOLSINSTALLDIR"].upper() + "BIN\\HOSTX86\\X86;")
)
if vc_x86_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_compiler_detection_index
):
vc_chosen_compiler_index = vc_x86_compiler_detection_index
vc_chosen_compiler_str = "x86"
vc_x86_amd64_compiler_detection_index = (
tools_env["PATH"].upper().find(tools_env["VCTOOLSINSTALLDIR"].upper() + "BIN\\HOSTX86\\X64;")
)
if vc_x86_amd64_compiler_detection_index > -1 and (
vc_chosen_compiler_index == -1 or vc_chosen_compiler_index > vc_x86_amd64_compiler_detection_index
):
vc_chosen_compiler_index = vc_x86_amd64_compiler_detection_index
vc_chosen_compiler_str = "x86_amd64"
return vc_chosen_compiler_str
def find_visual_c_batch_file(env):
from SCons.Tool.MSCommon.vc import get_default_version, get_host_target, find_batch_file
version = get_default_version(env)
(host_platform, target_platform, _) = get_host_target(env)
return find_batch_file(env, version, host_platform, target_platform)[0]
def generate_cpp_hint_file(filename):
if os.path.isfile(filename):
# Don't overwrite an existing hint file since the user may have customized it.
pass
else:
try:
with open(filename, "w") as fd:
fd.write("#define GDCLASS(m_class, m_inherits)\n")
except OSError:
print("Could not write cpp.hint file.")
def glob_recursive(pattern, node="."):
results = []
for f in Glob(str(node) + "/*", source=True):
if type(f) is Node.FS.Dir:
results += glob_recursive(pattern, f)
results += Glob(str(node) + "/" + pattern, source=True)
return results
def add_to_vs_project(env, sources):
for x in sources:
if type(x) == type(""):
fname = env.File(x).path
else:
fname = env.File(x)[0].path
pieces = fname.split(".")
if len(pieces) > 0:
basename = pieces[0]
basename = basename.replace("\\\\", "/")
if os.path.isfile(basename + ".h"):
env.vs_incs += [basename + ".h"]
elif os.path.isfile(basename + ".hpp"):
env.vs_incs += [basename + ".hpp"]
if os.path.isfile(basename + ".c"):
env.vs_srcs += [basename + ".c"]
elif os.path.isfile(basename + ".cpp"):
env.vs_srcs += [basename + ".cpp"]
def generate_vs_project(env, num_jobs):
batch_file = find_visual_c_batch_file(env)
if batch_file:
def build_commandline(commands):
common_build_prefix = [
'cmd /V /C set "plat=$(PlatformTarget)"',
'(if "$(PlatformTarget)"=="x64" (set "plat=x86_amd64"))',
'set "tools=%s"' % env["tools"],
'(if "$(Configuration)"=="release" (set "tools=no"))',
'call "' + batch_file + '" !plat!',
]
# windows allows us to have spaces in paths, so we need
# to double quote off the directory. However, the path ends
# in a backslash, so we need to remove this, lest it escape the
# last double quote off, confusing MSBuild
common_build_postfix = [
"--directory=\"$(ProjectDir.TrimEnd('\\'))\"",
"platform=windows",
"target=$(Configuration)",
"progress=no",
"tools=!tools!",
"-j%s" % num_jobs,
]
if env["custom_modules"]:
common_build_postfix.append("custom_modules=%s" % env["custom_modules"])
result = " ^& ".join(common_build_prefix + [" ".join([commands] + common_build_postfix)])
return result
add_to_vs_project(env, env.core_sources)
add_to_vs_project(env, env.drivers_sources)
add_to_vs_project(env, env.main_sources)
add_to_vs_project(env, env.modules_sources)
add_to_vs_project(env, env.scene_sources)
add_to_vs_project(env, env.servers_sources)
add_to_vs_project(env, env.editor_sources)
for header in glob_recursive("**/*.h"):
env.vs_incs.append(str(header))
env["MSVSBUILDCOM"] = build_commandline("scons")
env["MSVSREBUILDCOM"] = build_commandline("scons vsproj=yes")
env["MSVSCLEANCOM"] = build_commandline("scons --clean")
# This version information (Win32, x64, Debug, Release, Release_Debug seems to be
# required for Visual Studio to understand that it needs to generate an NMAKE
# project. Do not modify without knowing what you are doing.
debug_variants = ["debug|Win32"] + ["debug|x64"]
release_variants = ["release|Win32"] + ["release|x64"]
release_debug_variants = ["release_debug|Win32"] + ["release_debug|x64"]
variants = debug_variants + release_variants + release_debug_variants
debug_targets = ["bin\\godot.windows.tools.32.exe"] + ["bin\\godot.windows.tools.64.exe"]
release_targets = ["bin\\godot.windows.opt.32.exe"] + ["bin\\godot.windows.opt.64.exe"]
release_debug_targets = ["bin\\godot.windows.opt.tools.32.exe"] + ["bin\\godot.windows.opt.tools.64.exe"]
targets = debug_targets + release_targets + release_debug_targets
if not env.get("MSVS"):
env["MSVS"]["PROJECTSUFFIX"] = ".vcxproj"
env["MSVS"]["SOLUTIONSUFFIX"] = ".sln"
env.MSVSProject(
target=["#godot" + env["MSVSPROJECTSUFFIX"]],
incs=env.vs_incs,
srcs=env.vs_srcs,
runfile=targets,
buildtarget=targets,
auto_build_solution=1,
variant=variants,
)
else:
print("Could not locate Visual Studio batch file to set up the build environment. Not generating VS project.")
def precious_program(env, program, sources, **args):
program = env.ProgramOriginal(program, sources, **args)
env.Precious(program)
return program
def add_shared_library(env, name, sources, **args):
library = env.SharedLibrary(name, sources, **args)
env.NoCache(library)
return library
def add_library(env, name, sources, **args):
library = env.Library(name, sources, **args)
env.NoCache(library)
return library
def add_program(env, name, sources, **args):
program = env.Program(name, sources, **args)
env.NoCache(program)
return program
def CommandNoCache(env, target, sources, command, **args):
result = env.Command(target, sources, command, **args)
env.NoCache(result)
return result
def Run(env, function, short_message, subprocess=True):
output_print = short_message if not env["verbose"] else ""
if not subprocess:
return Action(function, output_print)
else:
return Action(run_in_subprocess(function), output_print)
def detect_darwin_sdk_path(platform, env):
sdk_name = ""
if platform == "osx":
sdk_name = "macosx"
var_name = "MACOS_SDK_PATH"
elif platform == "iphone":
sdk_name = "iphoneos"
var_name = "IPHONESDK"
elif platform == "iphonesimulator":
sdk_name = "iphonesimulator"
var_name = "IPHONESDK"
else:
raise Exception("Invalid platform argument passed to detect_darwin_sdk_path")
if not env[var_name]:
try:
sdk_path = subprocess.check_output(["xcrun", "--sdk", sdk_name, "--show-sdk-path"]).strip().decode("utf-8")
if sdk_path:
env[var_name] = sdk_path
except (subprocess.CalledProcessError, OSError):
print("Failed to find SDK path while running xcrun --sdk {} --show-sdk-path.".format(sdk_name))
raise
def is_vanilla_clang(env):
if not using_clang(env):
return False
try:
version = subprocess.check_output([env.subst(env["CXX"]), "--version"]).strip().decode("utf-8")
except (subprocess.CalledProcessError, OSError):
print("Couldn't parse CXX environment variable to infer compiler version.")
return False
return not version.startswith("Apple")
def get_compiler_version(env):
"""
Returns an array of version numbers as ints: [major, minor, patch].
The return array should have at least two values (major, minor).
"""
if not env.msvc:
# Not using -dumpversion as some GCC distros only return major, and
# Clang used to return hardcoded 4.2.1: # https://reviews.llvm.org/D56803
try:
version = subprocess.check_output([env.subst(env["CXX"]), "--version"]).strip().decode("utf-8")
except (subprocess.CalledProcessError, OSError):
print("Couldn't parse CXX environment variable to infer compiler version.")
return None
else: # TODO: Implement for MSVC
return None
match = re.search("[0-9]+\.[0-9.]+", version)
if match is not None:
return list(map(int, match.group().split(".")))
else:
return None
def using_gcc(env):
return "gcc" in os.path.basename(env["CC"])
def using_clang(env):
return "clang" in os.path.basename(env["CC"])
def show_progress(env):
import sys
from SCons.Script import Progress, Command, AlwaysBuild
screen = sys.stdout
# Progress reporting is not available in non-TTY environments since it
# messes with the output (for example, when writing to a file)
show_progress = env["progress"] and sys.stdout.isatty()
node_count = 0
node_count_max = 0
node_count_interval = 1
node_count_fname = str(env.Dir("#")) + "/.scons_node_count"
import time, math
class cache_progress:
# The default is 1 GB cache and 12 hours half life
def __init__(self, path=None, limit=1073741824, half_life=43200):
self.path = path
self.limit = limit
self.exponent_scale = math.log(2) / half_life
if env["verbose"] and path != None:
screen.write(
"Current cache limit is {} (used: {})\n".format(
self.convert_size(limit), self.convert_size(self.get_size(path))
)
)
self.delete(self.file_list())
def __call__(self, node, *args, **kw):
nonlocal node_count, node_count_max, node_count_interval, node_count_fname, show_progress
if show_progress:
# Print the progress percentage
node_count += node_count_interval
if node_count_max > 0 and node_count <= node_count_max:
screen.write("\r[%3d%%] " % (node_count * 100 / node_count_max))
screen.flush()
elif node_count_max > 0 and node_count > node_count_max:
screen.write("\r[100%] ")
screen.flush()
else:
screen.write("\r[Initial build] ")
screen.flush()
def delete(self, files):
if len(files) == 0:
return
if env["verbose"]:
# Utter something
screen.write("\rPurging %d %s from cache...\n" % (len(files), len(files) > 1 and "files" or "file"))
[os.remove(f) for f in files]
def file_list(self):
if self.path is None:
# Nothing to do
return []
# Gather a list of (filename, (size, atime)) within the
# cache directory
file_stat = [(x, os.stat(x)[6:8]) for x in glob.glob(os.path.join(self.path, "*", "*"))]
if file_stat == []:
# Nothing to do
return []
# Weight the cache files by size (assumed to be roughly
# proportional to the recompilation time) times an exponential
# decay since the ctime, and return a list with the entries
# (filename, size, weight).
current_time = time.time()
file_stat = [(x[0], x[1][0], (current_time - x[1][1])) for x in file_stat]
# Sort by the most recently accessed files (most sensible to keep) first
file_stat.sort(key=lambda x: x[2])
# Search for the first entry where the storage limit is
# reached
sum, mark = 0, None
for i, x in enumerate(file_stat):
sum += x[1]
if sum > self.limit:
mark = i
break
if mark is None:
return []
else:
return [x[0] for x in file_stat[mark:]]
def convert_size(self, size_bytes):
if size_bytes == 0:
return "0 bytes"
size_name = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (int(s) if i == 0 else s, size_name[i])
def get_size(self, start_path="."):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def progress_finish(target, source, env):
nonlocal node_count, progressor
with open(node_count_fname, "w") as f:
f.write("%d\n" % node_count)
progressor.delete(progressor.file_list())
try:
with open(node_count_fname) as f:
node_count_max = int(f.readline())
except:
pass
cache_directory = os.environ.get("SCONS_CACHE")
# Simple cache pruning, attached to SCons' progress callback. Trim the
# cache directory to a size not larger than cache_limit.
cache_limit = float(os.getenv("SCONS_CACHE_LIMIT", 1024)) * 1024 * 1024
progressor = cache_progress(cache_directory, cache_limit)
Progress(progressor, interval=node_count_interval)
progress_finish_command = Command("progress_finish", [], progress_finish)
AlwaysBuild(progress_finish_command)
def dump(env):
# Dumps latest build information for debugging purposes and external tools.
from json import dump
def non_serializable(obj):
return "<<non-serializable: %s>>" % (type(obj).__qualname__)
with open(".scons_env.json", "w") as f:
dump(env.Dictionary(), f, indent=4, default=non_serializable)
|
[] |
[] |
[
"SCONS_CACHE",
"SCONS_CACHE_LIMIT",
"BUILD_NAME"
] |
[]
|
["SCONS_CACHE", "SCONS_CACHE_LIMIT", "BUILD_NAME"]
|
python
| 3 | 0 | |
appveyor_keep_builds.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Ingo Breßler ([email protected])
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
#
# Using the AppVeyor REST API to purge old builds.
import sys, os
import argparse
from requests import get, delete # for HTTP requests
from dateutil.parser import parse as dateutil_parse
from operator import itemgetter
from helpers import makeRequest, assertEnvVarExists, jsonPrettyPrint
parser = argparse.ArgumentParser(description='Remove old AppVeyor builds.')
parser.add_argument('buildsToKeep', metavar='N', type=int, default=20,
help='The number of builds to keep.')
parser.add_argument('--verbose', const=True, default=False, action='store_const',
help='Give more output for debugging')
args = parser.parse_args()
requiredEnvVars = ('APPVEYOR_ACCOUNT_NAME', 'APPVEYOR_PROJECT_SLUG', 'APPVEYOR_TOKEN')
for vname in requiredEnvVars:
assertEnvVarExists(vname)
baseurl = f"https://ci.appveyor.com/api"
accountName = os.environ['APPVEYOR_ACCOUNT_NAME']
projectSlug = os.environ['APPVEYOR_PROJECT_SLUG']
authHead = {"Authorization": "Bearer " + os.environ['APPVEYOR_TOKEN']}
response, code = makeRequest(get, baseurl, f"/projects/{accountName}/{projectSlug}/history",
headers=authHead, verbose=args.verbose,
params=dict(recordsNumber=100))
if code != 200:
print("Could not get build history! Stopping.")
sys.exit(1)
#jsonPrettyPrint(response) # for debugging
builds = [(build['buildId'], dateutil_parse(build['finished']))
for build in response['builds'] if 'finished' in build]
builds.sort(key=itemgetter(-1))
print(f"Found {len(builds)} builds.")
for buildId, finished in builds[:-args.buildsToKeep]: # keep the newest <buildsToKeep>
print(f"Deleting buildId {buildId} finished at {finished.isoformat()}:", end=" ")
response, code = makeRequest(delete, baseurl, f"/account/{accountName}/builds/{buildId}",
headers=authHead, verbose=args.verbose)
print("OK" if code == 204 else f"{code} != 204!")
# vim: set ts=4 sw=4 sts=4 tw=0 et:
|
[] |
[] |
[
"APPVEYOR_PROJECT_SLUG",
"APPVEYOR_ACCOUNT_NAME",
"APPVEYOR_TOKEN"
] |
[]
|
["APPVEYOR_PROJECT_SLUG", "APPVEYOR_ACCOUNT_NAME", "APPVEYOR_TOKEN"]
|
python
| 3 | 0 | |
defaults/defaults.go
|
// DBDeployer - The MySQL Sandbox
// Copyright © 2006-2018 Giuseppe Maxia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package defaults
import (
"encoding/json"
"fmt"
"github.com/datacharmer/dbdeployer/common"
"os"
"strconv"
"strings"
"time"
)
type DbdeployerDefaults struct {
Version string `json:"version"`
SandboxHome string `json:"sandbox-home"`
SandboxBinary string `json:"sandbox-binary"`
UseSandboxCatalog bool `json:"use-sandbox-catalog"`
//UseConcurrency bool `json:"use-concurrency"`
MasterSlaveBasePort int `json:"master-slave-base-port"`
GroupReplicationBasePort int `json:"group-replication-base-port"`
GroupReplicationSpBasePort int `json:"group-replication-sp-base-port"`
FanInReplicationBasePort int `json:"fan-in-replication-base-port"`
AllMastersReplicationBasePort int `json:"all-masters-replication-base-port"`
MultipleBasePort int `json:"multiple-base-port"`
// GaleraBasePort int `json:"galera-base-port"`
// PXCBasePort int `json:"pxc-base-port"`
// NdbBasePort int `json:"ndb-base-port"`
GroupPortDelta int `json:"group-port-delta"`
MysqlXPortDelta int `json:"mysqlx-port-delta"`
MasterName string `json:"master-name"`
MasterAbbr string `json:"master-abbr"`
NodePrefix string `json:"node-prefix"`
SlavePrefix string `json:"slave-prefix"`
SlaveAbbr string `json:"slave-abbr"`
SandboxPrefix string `json:"sandbox-prefix"`
MasterSlavePrefix string `json:"master-slave-prefix"`
GroupPrefix string `json:"group-prefix"`
GroupSpPrefix string `json:"group-sp-prefix"`
MultiplePrefix string `json:"multiple-prefix"`
FanInPrefix string `json:"fan-in-prefix"`
AllMastersPrefix string `json:"all-masters-prefix"`
ReservedPorts []int `json:"reserved-ports"`
// GaleraPrefix string `json:"galera-prefix"`
// PxcPrefix string `json:"pxc-prefix"`
// NdbPrefix string `json:"ndb-prefix"`
Timestamp string `json:"timestamp"`
}
const (
min_port_value int = 11000
max_port_value int = 30000
LineLength int = 80
)
var (
home_dir string = os.Getenv("HOME")
ConfigurationDir string = home_dir + "/.dbdeployer"
ConfigurationFile string = ConfigurationDir + "/config.json"
CustomConfigurationFile string = ""
SandboxRegistry string = ConfigurationDir + "/sandboxes.json"
SandboxRegistryLock string = ConfigurationDir + "/sandboxes.lock"
StarLine string = strings.Repeat("*", LineLength)
DashLine string = strings.Repeat("-", LineLength)
HashLine string = strings.Repeat("#", LineLength)
// This variable is changed to true when the "cmd" package is activated,
// meaning that we're using the command line interface of dbdeployer.
// It is used to make decisions whether to write messages to the screen
// when calling sandbox creation functions from other apps.
UsingDbDeployer bool = false
factoryDefaults = DbdeployerDefaults{
Version: common.CompatibleVersion,
SandboxHome: home_dir + "/sandboxes",
SandboxBinary: home_dir + "/opt/mysql",
UseSandboxCatalog: true,
//UseConcurrency : true,
MasterSlaveBasePort: 11000,
GroupReplicationBasePort: 12000,
GroupReplicationSpBasePort: 13000,
FanInReplicationBasePort: 14000,
AllMastersReplicationBasePort: 15000,
MultipleBasePort: 16000,
// GaleraBasePort: 17000,
// PxcBasePort: 18000,
// NdbBasePort: 19000,
GroupPortDelta: 125,
MysqlXPortDelta: 10000,
MasterName: "master",
MasterAbbr: "m",
NodePrefix: "node",
SlavePrefix: "slave",
SlaveAbbr: "s",
SandboxPrefix: "msb_",
MasterSlavePrefix: "rsandbox_",
GroupPrefix: "group_msb_",
GroupSpPrefix: "group_sp_msb_",
MultiplePrefix: "multi_msb_",
FanInPrefix: "fan_in_msb_",
AllMastersPrefix: "all_masters_msb_",
ReservedPorts: []int{1186, 3306, 33060},
// GaleraPrefix: "galera_msb_",
// NdbPrefix: "ndb_msb_",
// PxcPrefix: "pxc_msb_",
Timestamp: time.Now().Format(time.UnixDate),
}
currentDefaults DbdeployerDefaults
)
func Defaults() DbdeployerDefaults {
if currentDefaults.Version == "" {
if common.FileExists(ConfigurationFile) {
currentDefaults = ReadDefaultsFile(ConfigurationFile)
} else {
currentDefaults = factoryDefaults
}
}
return currentDefaults
}
func ShowDefaults(defaults DbdeployerDefaults) {
defaults = replace_literal_env_values(defaults)
if common.FileExists(ConfigurationFile) {
fmt.Printf("# Configuration file: %s\n", ConfigurationFile)
} else {
fmt.Println("# Internal values:")
}
b, err := json.MarshalIndent(defaults, " ", "\t")
if err != nil {
common.Exit(1, fmt.Sprintf("error encoding defaults: %s", err))
}
fmt.Printf("%s\n", b)
}
func WriteDefaultsFile(filename string, defaults DbdeployerDefaults) {
defaults = replace_literal_env_values(defaults)
defaults_dir := common.DirName(filename)
if !common.DirExists(defaults_dir) {
common.Mkdir(defaults_dir)
}
b, err := json.MarshalIndent(defaults, " ", "\t")
if err != nil {
common.Exit(1, fmt.Sprintf("error encoding defaults: %s", err))
}
json_string := fmt.Sprintf("%s", b)
common.WriteString(json_string, filename)
}
func expand_environment_variables(defaults DbdeployerDefaults) DbdeployerDefaults {
defaults.SandboxHome = common.ReplaceEnvVar(defaults.SandboxHome, "HOME")
defaults.SandboxHome = common.ReplaceEnvVar(defaults.SandboxHome, "PWD")
defaults.SandboxBinary = common.ReplaceEnvVar(defaults.SandboxBinary, "HOME")
defaults.SandboxBinary = common.ReplaceEnvVar(defaults.SandboxBinary, "PWD")
return defaults
}
func replace_literal_env_values(defaults DbdeployerDefaults) DbdeployerDefaults {
defaults.SandboxHome = common.ReplaceLiteralEnvVar(defaults.SandboxHome, "HOME")
defaults.SandboxHome = common.ReplaceLiteralEnvVar(defaults.SandboxHome, "PWD")
defaults.SandboxBinary = common.ReplaceLiteralEnvVar(defaults.SandboxBinary, "HOME")
defaults.SandboxBinary = common.ReplaceLiteralEnvVar(defaults.SandboxBinary, "PWD")
return defaults
}
func ReadDefaultsFile(filename string) (defaults DbdeployerDefaults) {
defaults_blob := common.SlurpAsBytes(filename)
err := json.Unmarshal(defaults_blob, &defaults)
if err != nil {
common.Exit(1, fmt.Sprintf("error decoding defaults: %s", err))
}
defaults = expand_environment_variables(defaults)
return
}
func check_int(name string, val, min, max int) bool {
if val >= min && val <= max {
return true
}
fmt.Printf("Value %s (%d) must be between %d and %d\n", name, val, min, max)
return false
}
func ValidateDefaults(nd DbdeployerDefaults) bool {
var all_ints bool
all_ints = check_int("master-slave-base-port", nd.MasterSlaveBasePort, min_port_value, max_port_value) &&
check_int("group-replication-base-port", nd.GroupReplicationBasePort, min_port_value, max_port_value) &&
check_int("group-replication-sp-base-port", nd.GroupReplicationSpBasePort, min_port_value, max_port_value) &&
check_int("multiple-base-port", nd.MultipleBasePort, min_port_value, max_port_value) &&
check_int("fan-in-base-port", nd.FanInReplicationBasePort, min_port_value, max_port_value) &&
check_int("all-masters-base-port", nd.AllMastersReplicationBasePort, min_port_value, max_port_value) &&
// check_int("galera-base-port", nd.GaleraBasePort, min_port_value, max_port_value) &&
// check_int("pxc-base-port", nd.PxcBasePort, min_port_value, max_port_value) &&
// check_int("ndb-base-port", nd.NdbBasePort, min_port_value, max_port_value) &&
check_int("group-port-delta", nd.GroupPortDelta, 101, 299)
check_int("mysqlx-port-delta", nd.MysqlXPortDelta, 2000, 15000)
if !all_ints {
return false
}
var no_conflicts bool
no_conflicts = nd.MultipleBasePort != nd.GroupReplicationSpBasePort &&
nd.MultipleBasePort != nd.GroupReplicationBasePort &&
nd.MultipleBasePort != nd.MasterSlaveBasePort &&
nd.MultipleBasePort != nd.FanInReplicationBasePort &&
nd.MultipleBasePort != nd.AllMastersReplicationBasePort &&
// nd.MultipleBasePort != nd.NdbBasePort &&
// nd.MultipleBasePort != nd.GaleraBasePort &&
// nd.MultipleBasePort != nd.PxcBasePort &&
nd.MultiplePrefix != nd.GroupSpPrefix &&
nd.MultiplePrefix != nd.GroupPrefix &&
nd.MultiplePrefix != nd.MasterSlavePrefix &&
nd.MultiplePrefix != nd.SandboxPrefix &&
nd.MultiplePrefix != nd.FanInPrefix &&
nd.MultiplePrefix != nd.AllMastersPrefix &&
nd.MasterAbbr != nd.SlaveAbbr &&
// nd.MultiplePrefix != nd.NdbPrefix &&
// nd.MultiplePrefix != nd.GaleraPrefix &&
// nd.MultiplePrefix != nd.PxcPrefix &&
nd.SandboxHome != nd.SandboxBinary
if !no_conflicts {
fmt.Printf("Conflicts found in defaults values:\n")
ShowDefaults(nd)
return false
}
all_strings := nd.SandboxPrefix != "" &&
nd.MasterSlavePrefix != "" &&
nd.MasterName != "" &&
nd.MasterAbbr != "" &&
nd.NodePrefix != "" &&
nd.SlavePrefix != "" &&
nd.SlaveAbbr != "" &&
nd.GroupPrefix != "" &&
nd.GroupSpPrefix != "" &&
nd.MultiplePrefix != "" &&
nd.SandboxHome != "" &&
nd.SandboxBinary != ""
if !all_strings {
fmt.Printf("One or more empty values found in defaults\n")
ShowDefaults(nd)
return false
}
versionList := common.VersionToList(common.CompatibleVersion)
if !common.GreaterOrEqualVersion(nd.Version, versionList) {
fmt.Printf("Provided defaults are for version %s. Current version is %s\n", nd.Version, common.CompatibleVersion)
return false
}
return true
}
func RemoveDefaultsFile() {
if common.FileExists(ConfigurationFile) {
err := os.Remove(ConfigurationFile)
if err != nil {
common.Exit(1, fmt.Sprintf("%s", err))
}
fmt.Printf("#File %s removed\n", ConfigurationFile)
} else {
common.Exit(1, fmt.Sprintf("Configuration file %s not found", ConfigurationFile))
}
}
func a_to_i(val string) int {
numvalue, err := strconv.Atoi(val)
if err != nil {
common.Exit(1, fmt.Sprintf("Not a valid number: %s", val))
}
return numvalue
}
func UpdateDefaults(label, value string, store_defaults bool) {
new_defaults := Defaults()
switch label {
case "version":
new_defaults.Version = value
case "sandbox-home":
new_defaults.SandboxHome = value
case "sandbox-binary":
new_defaults.SandboxBinary = value
case "use-sandbox-catalog":
new_defaults.UseSandboxCatalog = common.TextToBool(value)
//case "use-concurrency":
// new_defaults.UseConcurrency = common.TextToBool(value)
case "master-slave-base-port":
new_defaults.MasterSlaveBasePort = a_to_i(value)
case "group-replication-base-port":
new_defaults.GroupReplicationBasePort = a_to_i(value)
case "group-replication-sp-base-port":
new_defaults.GroupReplicationSpBasePort = a_to_i(value)
case "multiple-base-port":
new_defaults.MultipleBasePort = a_to_i(value)
case "fan-in-base-port":
new_defaults.FanInReplicationBasePort = a_to_i(value)
case "all-masters-base-port":
new_defaults.AllMastersReplicationBasePort = a_to_i(value)
// case "ndb-base-port":
// new_defaults.NdbBasePort = a_to_i(value)
// case "galera-base-port":
// new_defaults.GaleraBasePort = a_to_i(value)
// case "pxc-base-port":
// new_defaults.PxcBasePort = a_to_i(value)
case "group-port-delta":
new_defaults.GroupPortDelta = a_to_i(value)
case "mysqlx-port-delta":
new_defaults.MysqlXPortDelta = a_to_i(value)
case "master-name":
new_defaults.MasterName = value
case "master-abbr":
new_defaults.MasterAbbr = value
case "node-prefix":
new_defaults.NodePrefix = value
case "slave-prefix":
new_defaults.SlavePrefix = value
case "slave-abbr":
new_defaults.SlaveAbbr = value
case "sandbox-prefix":
new_defaults.SandboxPrefix = value
case "master-slave-prefix":
new_defaults.MasterSlavePrefix = value
case "group-prefix":
new_defaults.GroupPrefix = value
case "group-sp-prefix":
new_defaults.GroupSpPrefix = value
case "multiple-prefix":
new_defaults.MultiplePrefix = value
case "fan-in-prefix":
new_defaults.FanInPrefix = value
case "all-masters-prefix":
new_defaults.AllMastersPrefix = value
case "reserved-ports":
new_defaults.ReservedPorts = common.StringToIntSlice(value)
// case "galera-prefix":
// new_defaults.GaleraPrefix = value
// case "pxc-prefix":
// new_defaults.PxcPrefix = value
// case "ndb-prefix":
// new_defaults.NdbPrefix = value
default:
common.Exit(1, fmt.Sprintf("Unrecognized label %s", label))
}
if ValidateDefaults(new_defaults) {
currentDefaults = new_defaults
if store_defaults {
WriteDefaultsFile(ConfigurationFile, Defaults())
fmt.Printf("# Updated %s -> \"%s\"\n", label, value)
}
} else {
common.Exit(1, fmt.Sprintf("Invalid defaults data %s : %s", label, value))
}
}
func LoadConfiguration() {
if !common.FileExists(ConfigurationFile) {
// WriteDefaultsFile(ConfigurationFile, Defaults())
return
}
new_defaults := ReadDefaultsFile(ConfigurationFile)
if ValidateDefaults(new_defaults) {
currentDefaults = new_defaults
} else {
fmt.Println(StarLine)
fmt.Printf("Defaults file %s not validated.\n", ConfigurationFile)
fmt.Println("Loading internal defaults")
fmt.Println(StarLine)
fmt.Println("")
time.Sleep(1000 * time.Millisecond)
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
test/e2e/storage/pmem_csi.go
|
/*
Copyright 2019 Intel Corporation.
SPDX-License-Identifier: Apache-2.0
*/
package storage
import (
"context"
"fmt"
"os"
"os/exec"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/intel/pmem-csi/test/e2e/deploy"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
)
var _ = deploy.DescribeForAll("Deployment", func(d *deploy.Deployment) {
f := framework.NewDefaultFramework("pmem")
// This checks that cluster-driver-registrar added the
// CSIDriverInfo for pmem-csi at some point in the past. A
// full test must include resetting the cluster and installing
// pmem-csi.
It("has CSIDriverInfo", func() {
_, err := f.ClientSet.StorageV1beta1().CSIDrivers().Get(context.Background(), "pmem-csi.intel.com", metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "get csidriver.storage.k8s.io for pmem-csi failed")
})
// This checks that temporarily running the driver pod on the master node
// creates an entry in CSINode and removes it again.
It("has CSINode", func() {
ctx := context.Background()
masterNode, err := findMasterNode(ctx, f.ClientSet)
framework.ExpectNoError(err)
addLabel := `{"metadata":{"labels":{"feature.node.kubernetes.io/memory-nv.dax": "true"}}}`
removeLabel := `{"metadata":{"labels":{"feature.node.kubernetes.io/memory-nv.dax": null}}}`
patchMasterNode := func(patch string) (*v1.Node, error) {
return f.ClientSet.CoreV1().Nodes().Patch(ctx, masterNode, k8stypes.MergePatchType, []byte(patch), metav1.PatchOptions{}, "")
}
getMasterCSINode := func() (*storagev1.CSINode, error) {
return f.ClientSet.StorageV1().CSINodes().Get(ctx, masterNode, metav1.GetOptions{})
}
// Whatever we do, always remove the label which might
// have cause the PMEM-CSI driver to run on the master
// node. None of the other tests expect that.
defer func() {
By("reverting labels")
if _, err := patchMasterNode(removeLabel); err != nil {
framework.Logf("removing labels failed: %v", err)
}
By("destroying namespace again")
sshcmd := fmt.Sprintf("%s/_work/%s/ssh.0", os.Getenv("REPO_ROOT"), os.Getenv("CLUSTER"))
cmd := exec.Command(sshcmd, "sudo ndctl destroy-namespace --force all")
out, err := cmd.CombinedOutput()
if err != nil {
framework.Logf("erasing namespaces with %+v failed: %s", cmd, string(out))
}
}()
_, err = patchMasterNode(addLabel)
framework.ExpectNoError(err, "set label with %q", addLabel)
Eventually(getMasterCSINode, "5m", "10s").Should(driverRunning{d.DriverName})
_, err = patchMasterNode(removeLabel)
framework.ExpectNoError(err, "remove label with %q", removeLabel)
Eventually(getMasterCSINode, "2m", "10s").ShouldNot(driverRunning{d.DriverName})
})
})
type driverRunning struct {
driverName string
}
var _ types.GomegaMatcher = driverRunning{}
func (d driverRunning) Match(actual interface{}) (success bool, err error) {
csiNode := actual.(*storagev1.CSINode)
for _, driver := range csiNode.Spec.Drivers {
if driver.Name == d.driverName {
return true, nil
}
}
return false, nil
}
func (d driverRunning) FailureMessage(actual interface{}) (message string) {
csiNode := actual.(*storagev1.CSINode)
return fmt.Sprintf("driver %s is not in %+v", d.driverName, *csiNode)
}
func (d driverRunning) NegatedFailureMessage(actual interface{}) (message string) {
csiNode := actual.(*storagev1.CSINode)
return fmt.Sprintf("driver %s is in %+v", d.driverName, *csiNode)
}
|
[
"\"REPO_ROOT\"",
"\"CLUSTER\""
] |
[] |
[
"CLUSTER",
"REPO_ROOT"
] |
[]
|
["CLUSTER", "REPO_ROOT"]
|
go
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sharinator.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/quill.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import markdown
import codecs
import ConfigParser
import readers.MarkdownReader as MarkdownReader
import containers.PostData as PostData
import generators.BlogPostGenerator as BlogPostGenerator
import os
class BlogMetadata():
def __init__(self):
self.blogName = unicode()
self.blogDescription = unicode()
self.blogURL = unicode()
self.blogTheme = unicode()
self.blogAuthor = unicode()
self.postsFolder = unicode()
self.draftsFolder = unicode()
self.themesFolder = unicode()
self.imagesFolder = unicode()
self.blogFolder = unicode()
self.displayAboutMe = unicode()
self.postsPerPage = unicode()
self.completeFeed = unicode()
self.comments = unicode()
self.disqusCode = unicode()
self.analyticsCode = unicode()
self.tagName = unicode()
self.tagHeader = unicode()
self.aboutHeader = unicode()
self.newerPosts = unicode()
self.olderPosts = unicode()
self.page = unicode()
self.of = unicode()
''' Reads quill.cfg file to load blog settings '''
def loadConfig(self,filename):
config = ConfigParser.RawConfigParser()
config.readfp(codecs.open(filename,'r','utf-8'))
self.blogName = config.get("Basic", "BlogName")
self.blogDescription = config.get("Basic", "BlogDescription")
self.blogURL = config.get("Basic", "BlogURL")
self.blogTheme = config.get("Basic", "Theme")
self.blogAuthor = config.get("Basic", "BlogAuthor")
self.postsFolder = config.get("Folders", "PostsFolder")
self.draftsFolder = config.get("Folders", "DraftsFolder")
self.themesFolder = config.get("Folders", "ThemesFolder")
self.imagesFolder = config.get("Folders", "ImgsFolder")
self.blogFolder = config.get("Folders", "BlogFolder")
self.displayAboutMe = config.get("BlogContent", "AboutMe")
self.postsPerPage = config.get("BlogContent", "PostsPerPage")
self.completeFeed = config.get("BlogContent", "CompleteFeed")
self.comments = config.get("BlogContent", "Comments")
self.analytics = config.get("BlogContent", "Analytics")
self.tagName = config.get("Misc", "TagName")
self.tagHeader = config.get("Misc", "TagHeader")
self.aboutHeader = config.get("Misc", "AboutHeader")
self.newerPosts = config.get("Misc", "NewerPosts")
self.olderPosts = config.get("Misc", "OlderPosts")
self.page = config.get("Misc", "Page")
self.of = config.get("Misc", "Of")
self.usePygments = config.get("SyntaxHighlighting", "UsePygments")
self.pygmentsStyle = config.get("SyntaxHighlighting", "PygmentsStyle")
def main():
# Variables
postList = list()
postDataList = list()
# 0. Display program and version
print "quill - v0.1a"
print
# 1. Read config file to load the metadata
print "Reading config file...",
blogSettings = BlogMetadata()
blogSettings.loadConfig("quill.cfg")
print "[OK]"
# 1.1. If comments are enabled, load Disqus string to adding it to the posts
print "Comments?",
if blogSettings.comments.lower() == "yes":
print "Yes, loading Disqus.txt"
disqusFile = open("disqus.txt","r")
blogSettings.disqusCode = disqusFile.read()
else:
print "No, skipping..."
# 1.2. If analytics are enabled, load analytics file to adding it to the posts
print "Analytics?"
if blogSettings.analytics.lower() == "yes":
print "Yes, loading analytics"
analyticsFile = open("analytics.txt","r")
blogSettings.analyticsCode = analyticsFile.read()
else:
print "No, skipping..."
# 2. Analyse postsFolder and search *.md files to process
print "Processing posts...",
# 2.1 Generate list of files to process
for root, dirs, files in os.walk(blogSettings.postsFolder):
for file in files:
if file.endswith(".md"):
postList.append(os.path.join(root, file))
print "[OK]"
# 3. Process *.md files to generate PostData
print "Generating post data...",
reader = MarkdownReader.MarkdownReader(blogSettings.postsFolder)
if blogSettings.displayAboutMe.lower() == "yes":
for post in postList:
if post.lower().endswith("about.md"):
aboutPost = reader.readNoMetadata(post)
aboutPost.title = blogSettings.aboutHeader
else:
postDataList.append(reader.read(post))
else:
try:
os.remove(os.path.join(blogSettings.blogFolder,"about.html"))
except:
pass
for post in postList:
if post.lower().endswith("about.md"):
pass
else:
postDataList.append(reader.read(post, blogSettings))
# 3.1. Order PostData files by date (newest posts first)
postDataList.sort(key=lambda PostData: PostData.dateParsed, reverse=True)
print "[OK]"
# 4. Generate blog from Post
print "Generating blog...",
# 4.1. Initialise generator and set theme
generator = BlogPostGenerator.BlogPostGenerator(blogSettings)
generator.loadTheme(blogSettings.blogTheme)
# 4.2. Copy images
generator.loadImages()
# 4.2. Generate blog entries
for post in postDataList:
generator.generatePost(post)
# 4.3. Generate index
generator.generateIndex(postDataList, blogSettings)
# 4.4. Generate about page
if blogSettings.displayAboutMe.lower() == "yes":
generator.generateAbout(aboutPost)
print "[OK]"
# 5. Generate tags (if necessary)
print "Generating tags...",
generator.generateTags(postDataList)
print "[OK]"
# 6. Generate RSS (if necessary)
print "Generating RSS feed...",
generator.generateRSS(postDataList)
print "[OK]"
print
print "Blog complete."
quit(0)
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
build-llvm.py
|
#!/usr/bin/env python3
# Description: Builds an LLVM toolchain suitable for kernel development
import argparse
import datetime
import glob
import pathlib
import os
import subprocess
import shutil
import textwrap
import time
import utils
import re
import urllib.request as request
from urllib.error import URLError
# This is a known good revision of LLVM for building the kernel
# To bump this, run 'PATH_OVERRIDE=<path_to_updated_toolchain>/bin kernel/build.sh --allyesconfig'
GOOD_REVISION = 'ecdae5df7da03c56d72796c0b1629edd0995548e'
class Directories:
def __init__(self, build_folder, install_folder, linux_folder,
root_folder):
self.build_folder = build_folder
self.install_folder = install_folder
self.linux_folder = linux_folder
self.root_folder = root_folder
class EnvVars:
def __init__(self, cc, cxx, ld):
self.cc = cc
self.cxx = cxx
self.ld = ld
def clang_version(cc, root_folder):
"""
Returns Clang's version as an integer
:param cc: The compiler to check the version of
:param root_folder: Top of the script folder
:return: an int denoting the version of the given compiler
"""
command = [root_folder.joinpath("clang-version.sh").as_posix(), cc]
return int(subprocess.check_output(command).decode())
def parse_parameters(root_folder):
"""
Parses parameters passed to the script into options
:param root_folder: The directory where the script is being invoked from
:return: A 'Namespace' object with all the options parsed from supplied parameters
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
clone_options = parser.add_mutually_exclusive_group()
opt_options = parser.add_mutually_exclusive_group()
parser.add_argument("--assertions",
help=textwrap.dedent("""\
In a release configuration, assertions are not enabled. Assertions can help catch
issues when compiling but it will increase compile times by 15-20%%.
"""),
action="store_true")
parser.add_argument("-b",
"--branch",
help=textwrap.dedent("""\
By default, the script builds the main branch (tip of tree) of LLVM. If you would
like to build an older branch, use this parameter. This may be helpful in tracking
down an older bug to properly bisect. This value is just passed along to 'git checkout'
so it can be a branch name, tag name, or hash (unless '--shallow-clone' is used, which
means a hash cannot be used because GitHub does not allow it).
"""),
type=str,
default="main")
parser.add_argument("-B",
"--build-folder",
help=textwrap.dedent("""\
By default, the script will create a "build" folder in the same folder as this script,
then an "llvm" folder within that one and build the files there. If you would like
that done somewhere else, pass it to this parameter. This can either be an absolute
or relative path.
"""),
type=str,
default=os.path.join(root_folder.as_posix(), "build",
"llvm"))
opt_options.add_argument("--build-stage1-only",
help=textwrap.dedent("""\
By default, the script does a multi-stage build: it builds a more lightweight version of
LLVM first (stage 1) then uses that build to build the full toolchain (stage 2). This
is also known as bootstrapping.
This option avoids that, building the first stage as if it were the final stage. Note,
this does not install the first stage only toolchain by default to avoid overwritting an
installed mutlt-stage LLVM toolchain; this option is more intended for quick testing
and verification of issues and not regular use. However, if your system is slow or can't
handle 2+ stage builds, you may need this flag. If you would like to install a toolchain
built with this flag, see '--install-stage1-only' below.
"""),
action="store_true")
# yapf: disable
parser.add_argument("--build-type",
metavar='BUILD_TYPE',
help=textwrap.dedent("""\
By default, the script does a Release build; Debug may be useful for tracking down
particularly nasty bugs.
See https://llvm.org/docs/GettingStarted.html#compiling-the-llvm-suite-source-code for
more information.
"""),
type=str,
choices=['Release', 'Debug', 'RelWithDebInfo', 'MinSizeRel'],
default="Release")
# yapf: enable
parser.add_argument("--check-targets",
help=textwrap.dedent("""\
By default, no testing is run on the toolchain. If you would like to run unit/regression
tests, use this parameter to specify a list of check targets to run with ninja. Common
ones include check-llvm, check-clang, and check-lld.
The values passed to this parameter will be automatically concatenated with 'check-'.
Example: '--check-targets clang llvm' will make ninja invokve 'check-clang' and 'check-llvm'.
"""),
nargs="+")
parser.add_argument("--clang-vendor",
help=textwrap.dedent("""\
Add this value to the clang version string (like "Apple clang version..." or
"Android clang version..."). Useful when reverting or applying patches on top
of upstream clang to differentiate a toolchain built with this script from
upstream clang or to distinguish a toolchain built with this script from the
system's clang. Defaults to ClangBuiltLinux, can be set to an empty string to
override this and have no vendor in the version string.
"""),
type=str,
default="ClangBuiltLinux")
parser.add_argument("-D",
"--defines",
help=textwrap.dedent("""\
Specify additional cmake values. These will be applied to all cmake invocations.
Example: -D LLVM_PARALLEL_COMPILE_JOBS=2 LLVM_PARALLEL_LINK_JOBS=2
See https://llvm.org/docs/CMake.html for various cmake values. Note that some of
the options to this script correspond to cmake values.
"""),
nargs="+")
parser.add_argument("-f",
"--full-toolchain",
help=textwrap.dedent("""\
By default, the script tunes LLVM for building the Linux kernel by disabling several
projects, targets, and configuration options, which speeds up build times but limits
how the toolchain could be used.
With this option, all projects and targets are enabled and the script tries to avoid
unnecessarily turning off configuration options. The '--projects' and '--targets' options
to the script can still be used to change the list of projects and targets. This is
useful when using the script to do upstream LLVM development or trying to use LLVM as a
system-wide toolchain.
"""),
action="store_true")
parser.add_argument("-i",
"--incremental",
help=textwrap.dedent("""\
By default, the script removes all build artifacts from previous compiles. This
prevents that, allowing for dirty builds and faster compiles.
"""),
action="store_true")
parser.add_argument("-I",
"--install-folder",
help=textwrap.dedent("""\
By default, the script will create an "install" folder in the same folder as this script
and install the LLVM toolchain there. If you'd like to have it installed somewhere
else, pass it to this parameter. This can either be an absolute or relative path.
"""),
type=str,
default=os.path.join(root_folder.as_posix(),
"install"))
parser.add_argument("--install-stage1-only",
help=textwrap.dedent("""\
When doing a stage 1 only build with '--build-stage1-only', install the toolchain to
the value of INSTALL_FOLDER.
"""),
action="store_true")
parser.add_argument("-L",
"--linux-folder",
help=textwrap.dedent("""\
If building with PGO, use this kernel source for building profiles instead of downloading
a tarball from kernel.org. This should be the full or relative path to a complete kernel
source directory, not a tarball or zip file.
"""),
type=str)
parser.add_argument("--lto",
metavar="LTO_TYPE",
help=textwrap.dedent("""\
Build the final compiler with either ThinLTO (thin) or full LTO (full), which can
often improve compile time performance by 3-5%% on average.
Only use full LTO if you have more than 64 GB of memory. ThinLTO uses way less memory,
compiles faster because it is fully multithreaded, and it has almost identical
performance (within 1%% usually) to full LTO. The compile time impact of ThinLTO is about
5x the speed of a '--build-stage1-only' build and 3.5x the speed of a default build. LTO
is much worse and is not worth considering unless you have a server available to build on.
This option should not be used with '--build-stage1-only' unless you know that your
host compiler and linker support it. See the two links below for more information.
https://llvm.org/docs/LinkTimeOptimization.html
https://clang.llvm.org/docs/ThinLTO.html
"""),
type=str,
choices=['thin', 'full'])
parser.add_argument("-n",
"--no-update",
help=textwrap.dedent("""\
By default, the script always updates the LLVM repo before building. This prevents
that, which can be helpful during something like bisecting or manually managing the
repo to pin it to a particular revision.
"""),
action="store_true")
parser.add_argument("--no-ccache",
help=textwrap.dedent("""\
By default, the script adds LLVM_CCACHE_BUILD to the cmake options so that ccache is
used for the stage one build. This helps speed up compiles but it is only useful for
stage one, which is built using the host compiler, which usually does not change,
resulting in more cache hits. Subsequent stages will be always completely clean builds
since ccache will have no hits due to using a new compiler and it will unnecessarily
fill up the cache with files that will never be called again due to changing compilers
on the next build. This option prevents ccache from being used even at stage one, which
could be useful for benchmarking clean builds.
"""),
action="store_true")
parser.add_argument("-p",
"--projects",
help=textwrap.dedent("""\
Currently, the script only enables the clang, compiler-rt, lld, and polly folders in LLVM.
If you would like to override this, you can use this parameter and supply a list that is
supported by LLVM_ENABLE_PROJECTS.
See step #5 here: https://llvm.org/docs/GettingStarted.html#getting-started-quickly-a-summary
Example: -p \"clang;lld;libcxx\"
"""),
type=str)
opt_options.add_argument("--pgo",
metavar="PGO_BENCHMARK",
help=textwrap.dedent("""\
Build the final compiler with Profile Guided Optimization, which can often improve compile
time performance by 15-20%% on average. The script will:
1. Build a small bootstrap compiler like usual (stage 1).
2. Build an instrumented compiler with that compiler (stage 2).
3. Run the specified benchmark(s).
kernel-defconfig, kernel-allmodconfig, kernel-allyesconfig:
Download and extract kernel source from kernel.org (unless '--linux-folder' is
specified), build the necessary binutils if not found in PATH, and build some
kernels based on the requested config with the instrumented compiler (based on the
'--targets' option). If there is a build error with one of the kernels, build-llvm.py
will fail as well.
llvm:
The script will run the LLVM tests if they were requested via '--check-targets' then
build a full LLVM toolchain with the instrumented compiler.
4. Build a final compiler with the profile data generated from step 3 (stage 3).
Due to the nature of this process, '--build-stage1-only' cannot be used. There will be
three distinct LLVM build folders/compilers and several kernel builds done by default so
ensure that you have enough space on your disk to hold this (25GB should be enough) and the
time/patience to build three toolchains and kernels (will often take 5x the amount of time
as '--build-stage1-only' and 4x the amount of time as the default two-stage build that the
script does). When combined with '--lto', the compile time impact is about 9-10x of a one or
two stage builds.
See https://llvm.org/docs/HowToBuildWithPGO.html for more information.
"""),
nargs="+",
choices=[
'kernel-defconfig', 'kernel-allmodconfig',
'kernel-allyesconfig', 'llvm'
])
clone_options.add_argument("-s",
"--shallow-clone",
help=textwrap.dedent("""\
Only fetch the required objects and omit history when cloning the LLVM repo. This
option is only used for the initial clone, not subsequent fetches. This can break
the script's ability to automatically update the repo to newer revisions or branches
so be careful using this. This option is really designed for continuous integration
runs, where a one off clone is necessary. A better option is usually managing the repo
yourself:
https://github.com/ClangBuiltLinux/tc-build#build-llvmpy
A couple of notes:
1. This cannot be used with '--use-good-revision'.
2. When no '--branch' is specified, only main is fetched. To work with other branches,
a branch other than main needs to be specified when the repo is first cloned.
"""),
action="store_true")
parser.add_argument("--show-build-commands",
help=textwrap.dedent("""\
By default, the script only shows the output of the comands it is running. When this option
is enabled, the invocations of cmake, ninja, and kernel/build.sh will be shown to help with
reproducing issues outside of the script.
"""),
action="store_true")
parser.add_argument("-t",
"--targets",
help=textwrap.dedent("""\
LLVM is multitargeted by default. Currently, this script only enables the arm32, aarch64,
bpf, mips, powerpc, riscv, s390, and x86 backends because that's what the Linux kernel is
currently concerned with. If you would like to override this, you can use this parameter
and supply a list that is supported by LLVM_TARGETS_TO_BUILD:
https://llvm.org/docs/CMake.html#llvm-specific-variables
Example: -t "AArch64;X86"
"""),
type=str)
clone_options.add_argument("--use-good-revision",
help=textwrap.dedent("""\
By default, the script updates LLVM to the latest tip of tree revision, which may at times be
broken or not work right. With this option, it will checkout a known good revision of LLVM
that builds and works properly. If you use this option often, please remember to update the
script as the known good revision will change.
NOTE: This option cannot be used with '--shallow-clone'.
"""),
action="store_true")
return parser.parse_args()
def linker_test(cc, ld):
"""
Test to see if the supplied ld value will work with cc -fuse=ld
:param cc: A working C compiler to compile the test program
:param ld: A linker to test -fuse=ld against
:return: 0 if the linker supports -fuse=ld, 1 otherwise
"""
echo = subprocess.Popen(['echo', 'int main() { return 0; }'],
stdout=subprocess.PIPE)
return subprocess.run(
[cc, '-fuse-ld=' + ld, '-o', '/dev/null', '-x', 'c', '-'],
stdin=echo.stdout,
stderr=subprocess.DEVNULL).returncode
def versioned_binaries(binary_name):
"""
Returns a list of versioned binaries that may be used on Debian/Ubuntu
:param binary_name: The name of the binary that we're checking for
:return: List of versioned binaries
"""
# There might be clang-7 to clang-11
tot_llvm_ver = 11
try:
response = request.urlopen(
'https://raw.githubusercontent.com/llvm/llvm-project/main/llvm/CMakeLists.txt'
)
to_parse = None
data = response.readlines()
for line in data:
line = line.decode('utf-8').strip()
if "set(LLVM_VERSION_MAJOR" in line:
to_parse = line
break
tot_llvm_ver = re.search('\d+', to_parse).group(0)
except URLError:
pass
return [
'%s-%s' % (binary_name, i) for i in range(int(tot_llvm_ver), 6, -1)
]
def check_cc_ld_variables(root_folder):
"""
Sets the cc, cxx, and ld variables, which will be passed to cmake
:return: A tuple of valid cc, cxx, ld values that can be used to compile LLVM
"""
utils.print_header("Checking CC and LD")
cc, linker, ld = None, None, None
# If the user specified a C compiler, get its full path
if 'CC' in os.environ:
cc = shutil.which(os.environ['CC'])
# Otherwise, try to find one
else:
possible_compilers = versioned_binaries("clang") + ['clang', 'gcc']
for compiler in possible_compilers:
cc = shutil.which(compiler)
if cc is not None:
break
if cc is None:
raise RuntimeError(
"Neither gcc nor clang could be found on your system!")
# Evaluate if CC is a symlink. Certain packages of clang (like from
# apt.llvm.org) symlink the clang++ binary to clang++-<version> in
# /usr/bin, which then points to something like /usr/lib/llvm-<version/bin.
# This won't be found by the dumb logic below and trying to parse and figure
# out a heuristic for that is a lot more effort than just going into the
# folder that clang is actually installed in and getting clang++ from there.
cc = os.path.realpath(cc)
cc_folder = os.path.dirname(cc)
# If the user specified a C++ compiler, get its full path
if 'CXX' in os.environ:
cxx = shutil.which(os.environ['CXX'])
# Otherwise, use the one where CC is
else:
if "clang" in cc:
cxx = "clang++"
else:
cxx = "g++"
cxx = shutil.which(cxx, path=cc_folder + ":" + os.environ['PATH'])
cxx = cxx.strip()
# If the user specified a linker
if 'LD' in os.environ:
# evaluate its full path with clang to avoid weird issues and check to
# see if it will work with '-fuse-ld', which is what cmake will do. Doing
# it now prevents a hard error later.
ld = os.environ['LD']
if "clang" in cc and clang_version(cc, root_folder) >= 30900:
ld = shutil.which(ld)
if linker_test(cc, ld):
print("LD won't work with " + cc +
", saving you from yourself by ignoring LD value")
ld = None
# If the user didn't specify a linker
else:
# and we're using clang, try to find the fastest one
if "clang" in cc:
possible_linkers = ['lld', 'gold', 'bfd']
for linker in possible_linkers:
# We want to find lld wherever the clang we are using is located
ld = shutil.which("ld." + linker,
path=cc_folder + ":" + os.environ['PATH'])
if ld is not None:
break
# If clang is older than 3.9, it won't accept absolute paths so we
# need to just pass it the name (and modify PATH so that it is found properly)
# https://github.com/llvm/llvm-project/commit/e43b7413597d8102a4412f9de41102e55f4f2ec9
if clang_version(cc, root_folder) < 30900:
os.environ['PATH'] = cc_folder + ":" + os.environ['PATH']
ld = linker
# and we're using gcc, try to use gold
else:
ld = "gold"
if linker_test(cc, ld):
ld = None
# Print what binaries we are using to compile/link with so the user can
# decide if that is proper or not
print("CC: " + cc)
print("CXX: " + cxx)
if ld is not None:
ld = ld.strip()
ld_to_print = shutil.which("ld." + ld)
if ld_to_print is None:
ld_to_print = shutil.which(ld)
print("LD: " + ld_to_print)
return cc, cxx, ld
def check_dependencies():
"""
Makes sure that the base dependencies of cmake, curl, git, and ninja are installed
"""
utils.print_header("Checking dependencies")
required_commands = ["cmake", "curl", "git", "ninja"]
for command in required_commands:
output = shutil.which(command)
if output is None:
raise RuntimeError(command +
" could not be found, please install it!")
print(output)
def repo_is_shallow(repo):
"""
Check if repo is a shallow clone already (looks for <repo>/.git/shallow)
:param repo: The path to the repo to check
:return: True if the repo is shallow, False if not
"""
git_dir = subprocess.check_output(["git", "rev-parse", "--git-dir"],
cwd=repo.as_posix()).decode().strip()
return pathlib.Path(repo).resolve().joinpath(git_dir, "shallow").exists()
def ref_exists(repo, ref):
"""
Check if ref exists using show-branch (works for branches, tags, and raw SHAs)
:param repo: The path to the repo to check
:param ref: The ref to check
:return: True if ref exits, False if not
"""
return subprocess.run(["git", "show-branch", ref],
stderr=subprocess.STDOUT,
stdout=subprocess.DEVNULL,
cwd=repo.as_posix()).returncode == 0
def fetch_llvm_binutils(root_folder, update, shallow, ref):
"""
Download llvm and binutils or update them if they exist
:param root_folder: Working directory
:param update: Boolean indicating whether sources need to be updated or not
:param ref: The ref to checkout the monorepo to
"""
p = root_folder.joinpath("llvm-project")
cwd = p.as_posix()
if p.is_dir():
if update:
utils.print_header("Updating LLVM")
# Make sure repo is up to date before trying to see if checkout is possible
subprocess.run(["git", "fetch", "origin"], check=True, cwd=cwd)
# Explain to the user how to avoid issues if their ref does not exist with
# a shallow clone.
if repo_is_shallow(p) and not ref_exists(p, ref):
utils.print_error(
"\nSupplied ref (%s) does not exist, cannot checkout." %
ref)
utils.print_error("To proceed, either:")
utils.print_error(
"\t1. Manage the repo yourself and pass '--no-update' to the script."
)
utils.print_error(
"\t2. Run 'git -C %s fetch --unshallow origin' to get a complete repository."
% cwd)
utils.print_error(
"\t3. Delete '%s' and re-run the script with '-s' + '-b <ref>' to get a full set of refs."
% cwd)
exit(1)
# Do the update
subprocess.run(["git", "checkout", ref], check=True, cwd=cwd)
local_ref = None
try:
local_ref = subprocess.check_output(
["git", "symbolic-ref", "-q", "HEAD"],
cwd=cwd).decode("utf-8")
except subprocess.CalledProcessError:
# This is thrown when we're on a revision that cannot be mapped to a symbolic reference, like a tag
# or a git hash. Swallow and move on with the rest of our business.
pass
if local_ref and local_ref.startswith("refs/heads/"):
# This is a branch, pull from remote
subprocess.run([
"git", "pull", "--rebase", "origin",
local_ref.strip().replace("refs/heads/", "")
],
check=True,
cwd=cwd)
else:
utils.print_header("Downloading LLVM")
extra_args = ()
if shallow:
extra_args = ("--depth", "1")
if ref != "main":
extra_args += ("--no-single-branch", )
subprocess.run([
"git", "clone", *extra_args,
"https://github.com/llvm/llvm-project",
p.as_posix()
],
check=True)
subprocess.run(["git", "checkout", ref], check=True, cwd=cwd)
# One might wonder why we are downloading binutils in an LLVM build script :)
# We need it for the LLVMgold plugin, which can be used for LTO with ld.gold,
# which at the time of writing this, is how the Google Pixel 3 kernel is built
# and linked.
utils.download_binutils(root_folder)
def cleanup(build_folder, incremental):
"""
Clean up and create the build folder
:param build_folder: The build directory
:param incremental: Whether the build is incremental or not.
:return:
"""
if not incremental and build_folder.is_dir():
shutil.rmtree(build_folder.as_posix())
build_folder.mkdir(parents=True, exist_ok=True)
def get_final_stage(args):
"""
Gets the final stage number, which depends on PGO or a stage one only build
:param args: The args variable generated by parse_parameters
:return: The final stage number
"""
if args.build_stage1_only:
return 1
elif args.pgo:
return 3
else:
return 2
def should_install_toolchain(args, stage):
"""
Returns true if the just built toolchain should be installed
:param args: The args variable generated by parse_parameters
:param stage: What stage we are at
:return: True when the toolchain should be installed; see function comments for more details
"""
# We shouldn't install the toolchain if we are not on the final stage
if stage != get_final_stage(args):
return False
# We shouldn't install the toolchain if the user is only building stage 1 build
# and they didn't explicitly request an install
if args.build_stage1_only and not args.install_stage1_only:
return False
# Otherwise, we should install the toolchain to the install folder
return True
def bootstrap_stage(args, stage):
"""
Returns true if we are doing a multistage build and on stage 1
:param args: The args variable generated by parse_parameters
:param stage: What stage we are at
:return: True if doing a multistage build and on stage 1, false if not
"""
return not args.build_stage1_only and stage == 1
def instrumented_stage(args, stage):
"""
Returns true if we are using PGO and on stage 2
:param args: The args variable generated by parse_parameters
:param stage: What stage we are at
:return: True if using PGO and on stage 2, false if not
"""
return args.pgo and stage == 2
def pgo_stage(stage):
"""
Returns true if LLVM is being built as a PGO benchmark
:return: True if LLVM is being built as a PGO benchmark, false if not
"""
return stage == "pgo"
def slim_cmake_defines():
"""
Generate a set of cmake defines to slim down the LLVM toolchain
:return: A set of defines
"""
# yapf: disable
defines = {
# Objective-C Automatic Reference Counting (we don't use Objective-C)
# https://clang.llvm.org/docs/AutomaticReferenceCounting.html
'CLANG_ENABLE_ARCMT': 'OFF',
# We don't (currently) use the static analyzer and it saves cycles
# according to Chromium OS:
# https://crrev.com/44702077cc9b5185fc21e99485ee4f0507722f82
'CLANG_ENABLE_STATIC_ANALYZER': 'OFF',
# We don't use the plugin system and it will remove unused symbols:
# https://crbug.com/917404
'CLANG_PLUGIN_SUPPORT': 'OFF',
# Don't build bindings; they are for other languages that the kernel does not use
'LLVM_ENABLE_BINDINGS': 'OFF',
# Don't build Ocaml documentation
'LLVM_ENABLE_OCAMLDOC': 'OFF',
# Don't build clang-tools-extras to cut down on build targets (about 400 files or so)
'LLVM_EXTERNAL_CLANG_TOOLS_EXTRA_SOURCE_DIR': '',
# Don't include documentation build targets because it is available on the web
'LLVM_INCLUDE_DOCS': 'OFF',
# Don't include example build targets to save on cmake cycles
'LLVM_INCLUDE_EXAMPLES': 'OFF'
}
# yapf: enable
return defines
def get_stage_binary(binary, dirs, stage):
"""
Generate a path from the stage bin directory for the requested binary
:param binary: Name of the binary
:param dirs: An instance of the Directories class with the paths to use
:param stage: The staged binary to use
:return: A path suitable for a cmake define
"""
return dirs.build_folder.joinpath("stage%d" % stage, "bin",
binary).as_posix()
def if_binary_exists(binary_name, cc):
"""
Returns the path of the requested binary if it exists and clang is being used, None if not
:param binary_name: Name of the binary
:param cc: Path to CC binary
:return: A path to binary if it exists and clang is being used, None if either condition is false
"""
binary = None
if "clang" in cc:
binary = shutil.which(binary_name,
path=os.path.dirname(cc) + ":" +
os.environ['PATH'])
return binary
def cc_ld_cmake_defines(dirs, env_vars, stage):
"""
Generate compiler and linker cmake defines, which change depending on what
stage we are at
:param dirs: An instance of the Directories class with the paths to use
:param env_vars: An instance of the EnvVars class with the compilers/linker to use
:param stage: What stage we are at
:return: A set of defines
"""
defines = {}
if stage == 1:
# Already figured out above
cc = env_vars.cc
cxx = env_vars.cxx
ld = env_vars.ld
# Optional to have
ar = if_binary_exists("llvm-ar", cc)
ranlib = if_binary_exists("llvm-ranlib", cc)
# Cannot be used from host due to potential incompatibilities
clang_tblgen = None
llvm_tblgen = None
else:
if pgo_stage(stage):
stage = 2
else:
stage = 1
ar = get_stage_binary("llvm-ar", dirs, stage)
cc = get_stage_binary("clang", dirs, stage)
clang_tblgen = get_stage_binary("clang-tblgen", dirs, stage)
cxx = get_stage_binary("clang++", dirs, stage)
ld = get_stage_binary("ld.lld", dirs, stage)
llvm_tblgen = get_stage_binary("llvm-tblgen", dirs, stage)
ranlib = get_stage_binary("llvm-ranlib", dirs, stage)
# Use llvm-ar for stage 2+ builds to avoid errors with bfd plugin
# bfd plugin: LLVM gold plugin has failed to create LTO module: Unknown attribute kind (60) (Producer: 'LLVM9.0.0svn' Reader: 'LLVM 8.0.0')
if ar:
defines['CMAKE_AR'] = ar
# The C compiler to use
defines['CMAKE_C_COMPILER'] = cc
if clang_tblgen:
defines['CLANG_TABLEGEN'] = clang_tblgen
# The C++ compiler to use
defines['CMAKE_CXX_COMPILER'] = cxx
# If we have a linker, use it
if ld:
defines['LLVM_USE_LINKER'] = ld
if llvm_tblgen:
defines['LLVM_TABLEGEN'] = llvm_tblgen
# Use llvm-ranlib for stage 2+ builds
if ranlib:
defines['CMAKE_RANLIB'] = ranlib
return defines
def distro_cmake_defines():
"""
Generate distribution specific cmake defines
:return: A set of defines
"""
defines = {}
# Clear Linux needs a different target to find all of the C++ header files, otherwise
# stage 2+ compiles will fail without this
# We figure this out based on the existence of x86_64-generic-linux in the C++ headers path
if glob.glob("/usr/include/c++/*/x86_64-generic-linux"):
defines['LLVM_HOST_TRIPLE'] = "x86_64-generic-linux"
return defines
def project_cmake_defines(args, stage):
"""
Generate lists of projects, depending on whether a full or
kernel-focused LLVM build is being done and the stage
:param args: The args variable generated by parse_parameters
:param stage: What stage we are at
:return: A set of defines
"""
defines = {}
if args.full_toolchain:
if args.projects:
projects = args.projects
else:
projects = "all"
else:
if bootstrap_stage(args, stage):
projects = "clang;lld"
if args.pgo:
projects += ';compiler-rt'
else:
if instrumented_stage(args, stage):
projects = "clang;lld"
else:
projects = "clang;compiler-rt;lld;polly"
defines['LLVM_ENABLE_PROJECTS'] = projects
if "compiler-rt" in projects:
if not args.full_toolchain:
# Don't build libfuzzer when compiler-rt is enabled, it invokes cmake again and we don't use it
defines['COMPILER_RT_BUILD_LIBFUZZER'] = 'OFF'
# We only use compiler-rt for the sanitizers, disable some extra stuff we don't need
# Chromium OS also does this: https://crrev.com/c/1629950
defines['COMPILER_RT_BUILD_CRT'] = 'OFF'
defines['COMPILER_RT_BUILD_XRAY'] = 'OFF'
# We don't need the sanitizers for the stage 1 bootstrap
if bootstrap_stage(args, stage):
defines['COMPILER_RT_BUILD_SANITIZERS'] = 'OFF'
return defines
def get_targets(args):
"""
Gets the list of targets for cmake and kernel/build.sh
:param args: The args variable generated by parse_parameters
:return: A string of targets suitable for cmake or kernel/build.sh
"""
if args.targets:
targets = args.targets
elif args.full_toolchain:
targets = "all"
else:
targets = "AArch64;ARM;BPF;Mips;PowerPC;RISCV;SystemZ;X86"
return targets
def target_cmake_defines(args, stage):
"""
Generate target cmake define, which change depending on what
stage we are at
:param args: The args variable generated by parse_parameters
:param stage: What stage we are at
:return: A set of defines
"""
defines = {}
if bootstrap_stage(args, stage):
targets = "host"
else:
targets = get_targets(args)
defines['LLVM_TARGETS_TO_BUILD'] = targets
return defines
def stage_specific_cmake_defines(args, dirs, stage):
"""
Generate other stage specific defines
:param args: The args variable generated by parse_parameters
:param dirs: An instance of the Directories class with the paths to use
:param stage: What stage we are at
:return: A set of defines
"""
defines = {}
# Use ccache for the stage 1 build as it will usually be done with a consistent
# compiler and won't need a full rebuild very often
if stage == 1 and not args.no_ccache and shutil.which("ccache"):
defines['LLVM_CCACHE_BUILD'] = 'ON'
if bootstrap_stage(args, stage):
# Based on clang/cmake/caches/Apple-stage1.cmake
defines.update(slim_cmake_defines())
defines['CMAKE_BUILD_TYPE'] = 'Release'
defines['LLVM_BUILD_UTILS'] = 'OFF'
defines['LLVM_ENABLE_BACKTRACES'] = 'OFF'
defines['LLVM_ENABLE_WARNINGS'] = 'OFF'
defines['LLVM_INCLUDE_TESTS'] = 'OFF'
else:
# https://llvm.org/docs/CMake.html#frequently-used-cmake-variables
defines['CMAKE_BUILD_TYPE'] = args.build_type
# We don't care about warnings if we are building a release build
if args.build_type == "Release":
defines['LLVM_ENABLE_WARNINGS'] = 'OFF'
# Build with assertions enabled if requested (will slow down compilation
# so it is not on by default)
if args.assertions:
defines['LLVM_ENABLE_ASSERTIONS'] = 'ON'
# Where the toolchain should be installed
defines['CMAKE_INSTALL_PREFIX'] = dirs.install_folder.as_posix()
# Build with instrumentation if we are using PGO and on stage 2
if instrumented_stage(args, stage):
defines['LLVM_BUILD_INSTRUMENTED'] = 'IR'
defines['LLVM_BUILD_RUNTIME'] = 'OFF'
# If we are at the final stage, use PGO/Thin LTO if requested
if stage == get_final_stage(args):
if args.pgo:
defines['LLVM_PROFDATA_FILE'] = dirs.build_folder.joinpath(
"profdata.prof").as_posix()
if args.lto:
defines['LLVM_ENABLE_LTO'] = args.lto.capitalize()
# If the user did not specify CMAKE_C_FLAGS or CMAKE_CXX_FLAGS, add them as empty
# to paste stage 2 to ensure there are no environment issues (since CFLAGS and CXXFLAGS
# are taken into account by cmake)
keys = ['CMAKE_C_FLAGS', 'CMAKE_CXX_FLAGS']
for key in keys:
if not key in str(args.defines):
defines[key] = ''
# For LLVMgold.so, which is used for LTO with ld.gold
defines['LLVM_BINUTILS_INCDIR'] = dirs.root_folder.joinpath(
utils.current_binutils(), "include").as_posix()
defines['LLVM_ENABLE_PLUGINS'] = 'ON'
return defines
def build_cmake_defines(args, dirs, env_vars, stage):
"""
Generate cmake defines
:param args: The args variable generated by parse_parameters
:param dirs: An instance of the Directories class with the paths to use
:param env_vars: An instance of the EnvVars class with the compilers/linker to use
:param stage: What stage we are at
:return: A set of defines
"""
defines = {}
# Get slim defines if we are not building a full toolchain
if not args.full_toolchain:
defines.update(slim_cmake_defines())
# Add compiler/linker defines, which change based on stage
defines.update(cc_ld_cmake_defines(dirs, env_vars, stage))
# Add distribution specific defines
defines.update(distro_cmake_defines())
# Add project and target defines, which change based on stage
defines.update(project_cmake_defines(args, stage))
defines.update(target_cmake_defines(args, stage))
# Add other stage specific defines
defines.update(stage_specific_cmake_defines(args, dirs, stage))
# Add the vendor string if necessary
if args.clang_vendor:
defines['CLANG_VENDOR'] = args.clang_vendor
# Removes system dependency on terminfo to keep the dynamic library dependencies slim
defines['LLVM_ENABLE_TERMINFO'] = 'OFF'
return defines
def show_command(args, command):
"""
:param args: The args variable generated by parse_parameters
:param command: The command being run
"""
if args.show_build_commands:
print("$ %s" % " ".join([str(element) for element in command]))
def get_pgo_header_folder(stage):
if pgo_stage(stage):
header_string = "for PGO"
sub_folder = "pgo"
else:
header_string = "stage %d" % stage
sub_folder = "stage%d" % stage
return (header_string, sub_folder)
def invoke_cmake(args, dirs, env_vars, stage):
"""
Invoke cmake to generate the build files
:param args: The args variable generated by parse_parameters
:param dirs: An instance of the Directories class with the paths to use
:param env_vars: An instance of the EnvVars class with the compilers/linker to use
:param stage: What stage we are at
:return:
"""
# Add the defines, point them to our build folder, and invoke cmake
cmake = ['cmake', '-G', 'Ninja', '-Wno-dev']
defines = build_cmake_defines(args, dirs, env_vars, stage)
for key in defines:
newdef = '-D' + key + '=' + defines[key]
cmake += [newdef]
if args.defines:
for d in args.defines:
cmake += ['-D' + d]
cmake += [dirs.root_folder.joinpath("llvm-project", "llvm").as_posix()]
header_string, sub_folder = get_pgo_header_folder(stage)
cwd = dirs.build_folder.joinpath(sub_folder).as_posix()
utils.print_header("Configuring LLVM %s" % header_string)
show_command(args, cmake)
subprocess.run(cmake, check=True, cwd=cwd)
def print_install_info(install_folder):
"""
Prints out where the LLVM toolchain is installed, how to add to PATH, and version information
:param install_folder: Where the LLVM toolchain is installed
:return:
"""
bin_folder = install_folder.joinpath("bin")
print("\nLLVM toolchain installed to: %s" % install_folder.as_posix())
print("\nTo use, either run:\n")
print(" $ export PATH=%s:${PATH}\n" % bin_folder.as_posix())
print("or add:\n")
print(" PATH=%s:${PATH}\n" % bin_folder.as_posix())
print("to the command you want to use this toolchain.\n")
clang = bin_folder.joinpath("clang")
lld = bin_folder.joinpath("ld.lld")
if clang.exists() or lld.exists():
print("Version information:\n")
for binary in [clang, lld]:
if binary.exists():
subprocess.run([binary, "--version"], check=True)
print()
def ninja_check(args, build_folder):
"""
Invoke ninja with check targets if they are present
:param args: The args variable generated by parse_parameters
:param build_folder: The build folder that ninja should be run in
:return:
"""
if args.check_targets:
ninja_check = ['ninja'] + ['check-%s' % s for s in args.check_targets]
show_command(args, ninja_check)
subprocess.run(ninja_check, check=True, cwd=build_folder)
def invoke_ninja(args, dirs, stage):
"""
Invoke ninja to run the actual build
:param args: The args variable generated by parse_parameters
:param dirs: An instance of the Directories class with the paths to use
:param stage: The current stage we're building
:return:
"""
header_string, sub_folder = get_pgo_header_folder(stage)
utils.print_header("Building LLVM %s" % header_string)
build_folder = dirs.build_folder.joinpath(sub_folder)
install_folder = None
if should_install_toolchain(args, stage):
install_folder = dirs.install_folder
elif stage == 1 and args.build_stage1_only and not args.install_stage1_only:
install_folder = build_folder
build_folder = build_folder.as_posix()
time_started = time.time()
show_command(args, ["ninja"])
subprocess.run('ninja', check=True, cwd=build_folder)
if stage == get_final_stage(args):
ninja_check(args, build_folder)
print()
print("LLVM build duration: " +
str(datetime.timedelta(seconds=int(time.time() - time_started))))
if should_install_toolchain(args, stage):
subprocess.run(['ninja', 'install'],
check=True,
cwd=build_folder,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
utils.create_gitignore(install_folder)
if install_folder is not None:
print_install_info(install_folder)
def kernel_build_sh(args, config, dirs):
"""
Run kernel/build.sh to generate PGO profiles
:param args: The args variable generated by parse_parameters
:param config: The config to build (defconfig, allmodconfig, allyesconfig)
:param dirs: An instance of the Directories class with the paths to use
:return:
"""
# Run kernel/build.sh
build_sh = [
dirs.root_folder.joinpath("kernel", "build.sh"), '-b',
dirs.build_folder, '--pgo', '-t',
get_targets(args)
]
if config != "defconfig":
build_sh += ['--%s' % config]
if dirs.linux_folder:
build_sh += ['-s', dirs.linux_folder.as_posix()]
show_command(args, build_sh)
subprocess.run(build_sh, check=True, cwd=dirs.build_folder.as_posix())
def pgo_llvm_build(args, dirs):
"""
Builds LLVM as a PGO benchmark
:param args: The args variable generated by parse_parameters
:param dirs: An instance of the Directories class with the paths to use
:return:
"""
# Run check targets if the user requested them for PGO coverage
ninja_check(args, dirs.build_folder.joinpath("stage2").as_posix())
# Then, build LLVM as if it were the full final toolchain
stage = "pgo"
dirs.build_folder.joinpath(stage).mkdir(parents=True, exist_ok=True)
invoke_cmake(args, dirs, None, stage)
invoke_ninja(args, dirs, stage)
def generate_pgo_profiles(args, dirs):
"""
Build a set of kernels across a few architectures to generate PGO profiles
:param args: The args variable generated by parse_parameters
:param dirs: An instance of the Directories class with the paths to use
:return:
"""
utils.print_header("Building PGO profiles")
# Run PGO benchmarks
for pgo in args.pgo:
if pgo.split("-")[0] == "kernel":
kernel_build_sh(args, pgo.split("-")[1], dirs)
if pgo == "llvm":
pgo_llvm_build(args, dirs)
# Combine profiles
subprocess.run([
dirs.build_folder.joinpath("stage1", "bin", "llvm-profdata"), "merge",
"-output=%s" % dirs.build_folder.joinpath("profdata.prof").as_posix()
] + glob.glob(
dirs.build_folder.joinpath("stage2", "profiles",
"*.profraw").as_posix()),
check=True)
def do_multistage_build(args, dirs, env_vars):
stages = [1]
if not args.build_stage1_only:
stages += [2]
if args.pgo:
stages += [3]
for stage in stages:
dirs.build_folder.joinpath("stage%d" % stage).mkdir(parents=True,
exist_ok=True)
invoke_cmake(args, dirs, env_vars, stage)
invoke_ninja(args, dirs, stage)
# Build profiles after stage 2 when using PGO
if instrumented_stage(args, stage):
generate_pgo_profiles(args, dirs)
def main():
root_folder = pathlib.Path(__file__).resolve().parent
args = parse_parameters(root_folder)
build_folder = pathlib.Path(args.build_folder)
if not build_folder.is_absolute():
build_folder = root_folder.joinpath(build_folder)
install_folder = pathlib.Path(args.install_folder)
if not install_folder.is_absolute():
install_folder = root_folder.joinpath(install_folder)
linux_folder = None
if args.linux_folder:
linux_folder = pathlib.Path(args.linux_folder)
if not linux_folder.is_absolute():
linux_folder = root_folder.joinpath(linux_folder)
if not linux_folder.exists():
utils.print_error("\nSupplied kernel source (%s) does not exist!" %
linux_folder.as_posix())
exit(1)
env_vars = EnvVars(*check_cc_ld_variables(root_folder))
check_dependencies()
if args.use_good_revision:
ref = GOOD_REVISION
else:
ref = args.branch
fetch_llvm_binutils(root_folder, not args.no_update, args.shallow_clone,
ref)
cleanup(build_folder, args.incremental)
dirs = Directories(build_folder, install_folder, linux_folder, root_folder)
do_multistage_build(args, dirs, env_vars)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CXX",
"CC",
"PATH",
"LD"
] |
[]
|
["CXX", "CC", "PATH", "LD"]
|
python
| 4 | 0 | |
node/modules/genesis.go
|
package modules
import (
"bytes"
"os"
"go.uber.org/fx"
"github.com/ipfs/go-datastore"
"github.com/ipld/go-car"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/helpers"
)
func ErrorGenesis() Genesis {
return func() (header *types.BlockHeader, e error) {
return nil, xerrors.New("No genesis block provided, provide the file with 'lotus daemon --genesis=[genesis file]'")
}
}
func LoadGenesis(genBytes []byte) func(fx.Lifecycle, helpers.MetricsCtx, dtypes.ChainBlockstore) Genesis {
return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, bs dtypes.ChainBlockstore) Genesis {
return func() (header *types.BlockHeader, e error) {
ctx := helpers.LifecycleCtx(mctx, lc)
c, err := car.LoadCar(ctx, bs, bytes.NewReader(genBytes))
if err != nil {
return nil, xerrors.Errorf("loading genesis car file failed: %w", err)
}
if len(c.Roots) != 1 {
return nil, xerrors.New("expected genesis file to have one root")
}
root, err := bs.Get(ctx, c.Roots[0])
if err != nil {
return nil, err
}
h, err := types.DecodeBlock(root.RawData())
if err != nil {
return nil, xerrors.Errorf("decoding block failed: %w", err)
}
return h, nil
}
}
}
func DoSetGenesis(_ dtypes.AfterGenesisSet) {}
func SetGenesis(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) {
ctx := helpers.LifecycleCtx(mctx, lc)
genFromRepo, err := cs.GetGenesis(ctx)
if err == nil {
if os.Getenv("LOTUS_SKIP_GENESIS_CHECK") != "_yes_" {
expectedGenesis, err := g()
if err != nil {
return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting expected genesis failed: %w", err)
}
if genFromRepo.Cid() != expectedGenesis.Cid() {
return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis in the repo is not the one expected by this version of Lotus!")
}
}
return dtypes.AfterGenesisSet{}, nil // already set, noop
}
if err != datastore.ErrNotFound {
return dtypes.AfterGenesisSet{}, xerrors.Errorf("getting genesis block failed: %w", err)
}
genesis, err := g()
if err != nil {
return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis func failed: %w", err)
}
return dtypes.AfterGenesisSet{}, cs.SetGenesis(ctx, genesis)
}
|
[
"\"LOTUS_SKIP_GENESIS_CHECK\""
] |
[] |
[
"LOTUS_SKIP_GENESIS_CHECK"
] |
[]
|
["LOTUS_SKIP_GENESIS_CHECK"]
|
go
| 1 | 0 | |
example/client/main.go
|
package main
import (
"context"
"database/sql"
"log"
"os"
"time"
"github.com/B3rs/gork/client"
_ "github.com/lib/pq"
)
type IncreaseArgs struct {
IncreaseThis int `json:"increase_this"`
}
type LowerizeArgs struct {
LowerizeThis string `json:"lowerize_this"`
}
func main() {
// normally open a db connection with the standard sql package
db, err := sql.Open("postgres", os.Getenv("POSTGRES_URI"))
if err != nil {
panic(err)
}
// create a client
c := client.NewClient(db)
// schedule a job
err = c.Schedule(
context.Background(),
"increase_1",
"increase",
IncreaseArgs{IncreaseThis: 1234},
)
manageErr(err)
// this job is likely to fail so we can add some retries
err = c.Schedule(
context.Background(),
"increase_2",
"increase",
IncreaseArgs{IncreaseThis: 123},
client.WithMaxRetries(3), // retry 3 times
client.WithRetryInterval(2*time.Second), // wait 2 seconds between retries
)
manageErr(err)
// we can also schedule a job that will be executed in the future
desiredExecutionTime := time.Now().Add(5 * time.Second)
err = c.Schedule(
context.Background(),
"increase_3",
"increase",
IncreaseArgs{IncreaseThis: 456},
client.WithScheduleTime(desiredExecutionTime), // schedule in 5 seconds
)
manageErr(err)
// we can think about scheduling a job in the future
desiredExecutionTime = time.Now().Add(5 * time.Second)
err = c.Schedule(
context.Background(),
"increase_4",
"increase",
IncreaseArgs{IncreaseThis: 658},
client.WithScheduleTime(desiredExecutionTime), // schedule in 5 seconds
)
manageErr(err)
// and cancel it if we want
err = c.Cancel(context.Background(), "increase_4")
manageErr(err)
// we can schedule different kind of jobs
err = c.Schedule(
context.Background(),
"lowerize_woof",
"lowerize", // by using a different queue
LowerizeArgs{LowerizeThis: "WoOf"}, // With different params
)
manageErr(err)
// we can start a standard sql transaction
tx, err := db.Begin()
manageErr(err)
// and do everything we did before inside a transaction
err = c.WithTx(tx).Schedule(
context.Background(),
"lowerize_meow",
"lowerize",
LowerizeArgs{LowerizeThis: "MeOOOw"},
client.WithMaxRetries(3),
client.WithRetryInterval(1*time.Second),
client.WithScheduleTime(time.Now().Add(2*time.Second)),
)
manageErr(err)
// and commit the transaction atomically when all database operations are done
err = tx.Commit()
manageErr(err)
}
func manageErr(err error) {
if err != nil {
log.Fatal(err)
}
}
|
[
"\"POSTGRES_URI\""
] |
[] |
[
"POSTGRES_URI"
] |
[]
|
["POSTGRES_URI"]
|
go
| 1 | 0 | |
plugins/dsstore/dsstore_test.go
|
/*
Copyright © 2019 The Goca.io team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dsstore
import (
"os"
"strings"
"testing"
"github.com/gocaio/goca"
"github.com/gocaio/goca/gocaTesting"
)
// Test server URL.
var testserver = os.Getenv("GOCA_TEST_SERVER")
// T is a global reference for the test. This allows us to use *testing.T
// methods anywhere
var T *testing.T
// TestReadDSSTORE tests the read on DS_Store files
func TestReadDSSTORE(t *testing.T) {
T = t // Assignment t (*testing.T to a global T variable)
// Get a controller
ctrl := goca.NewControllerTest()
// Subscribe a processOutput. The propper test will be placed in proccessOutput
ctrl.Subscribe(goca.Topics["NewOutput"], processOutput)
// Call the plugin entrypoint
setup(ctrl)
gocatesting.GetAssets(t, ctrl, testserver, plugName)
}
func processOutput(module, url string, out *goca.Output) {
// We have to validate goca.Output according to the resource
parts := strings.Split(out.Target, "/")
switch parts[len(parts)-1] {
case "DS_Store_1":
validateCaseA(out)
case "DS_Store_2":
validateCaseB(out)
case "DS_Store_3":
validateCaseC(out)
case "DS_Store_bad":
validateCaseD(out)
}
}
func validateCaseA(out *goca.Output) {
if out.MainType != "DSSTORE" {
T.Errorf("expected DSSTORE but found %s", out.MainType)
}
if out.Keywords != "Classification Template, Classification Template, Classification Template, Classification Template, Classification Template, Data Preprocessing Template, Data Preprocessing Template, Data Preprocessing Template, Data Preprocessing Template, Data Preprocessing Template" {
T.Errorf("expected other value but found %s", out.CreateDate)
}
}
func validateCaseB(out *goca.Output) {
if out.MainType != "DSSTORE" {
T.Errorf("expected DSSTORE but found %s", out.MainType)
}
if out.Keywords != "bam, bar, baz" {
T.Errorf("expected \"bam, bar, baz\" but found %s", out.CreateDate)
}
}
func validateCaseC(out *goca.Output) {
if out.MainType != "DSSTORE" {
T.Errorf("expected DSSTORE but found %s", out.MainType)
}
if out.Keywords != "., ., ., ., ., ., Pelis, Pelis, Pelis, Pelis, Pelis, Pelis, Pelis, Series, Series, Series, Series, Series, Series, Series, Series, Series, Series, Software, Software, Software, Software, Software, Software, Software, Software, Software" {
T.Errorf("expected other value but found %s", out.CreateDate)
}
}
func validateCaseD(out *goca.Output) {
if out.MainType != "DSSTORE" {
T.Errorf("expected DSSTORE but found %s", out.MainType)
}
if out.Keywords != "" {
T.Errorf("expected \"\" but found %s", out.CreateDate)
}
}
|
[
"\"GOCA_TEST_SERVER\""
] |
[] |
[
"GOCA_TEST_SERVER"
] |
[]
|
["GOCA_TEST_SERVER"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"net/http"
"os"
"github.com/poximy/url-shortener-backend/api"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/go-chi/cors"
)
func main() {
r := chi.NewRouter()
r.Use(middleware.Logger)
r.Use(cors.Handler(cors.Options{
AllowedOrigins: []string{"https://*", "http://*"},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "application/json"},
}))
r.Mount("/", api.UrlRouter())
err := http.ListenAndServe(port(), r)
if err != nil {
panic(err)
}
}
func port() string {
portNum := os.Getenv("PORT")
if portNum == "" {
portNum = "8080" // Default port if not specified
}
return ":" + portNum
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
driver/cmd/mount.go
|
// Copyright © 2018 munzli <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"os"
"flag"
"errors"
"os/exec"
"encoding/json"
"path/filepath"
"github.com/spf13/cobra"
"github.com/monostream/k8s-localflex-provisioner/driver/helper"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/kubernetes/pkg/kubelet/apis"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var MountDir string
// mountCmd represents the mount command
var mountCmd = &cobra.Command{
Use: "mount",
Short: "Creates a directory",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
return errors.New("requires at least 2 args")
}
return nil
},
Long: `Creates a directory`,
Run: func(cmd *cobra.Command, args []string) {
var path string
var name string
var affinity string
var _ string
// get the json options
var options interface{}
json.Unmarshal([]byte(args[1]), &options)
mappedOptions := options.(map[string]interface{})
for k, v := range mappedOptions {
switch k {
case "path":
path = v.(string)
case "name":
name = v.(string)
case "affinity":
affinity = v.(string)
case "directory":
_ = v.(string)
}
}
// if the source directory doesn't exist, create it
if _, err := os.Stat(path); os.IsNotExist(err) {
errDir := os.Mkdir(path, 0755)
if errDir != nil {
helper.Handle(helper.Response{
Status: helper.StatusFailure,
Message: "make source directory: " + errDir.Error(),
})
return
}
}
// if the target directory doesn't exist, create it
if _, err := os.Stat(args[0]); os.IsNotExist(err) {
errDir := os.Mkdir(args[0], 0755)
if errDir != nil {
helper.Handle(helper.Response{
Status: helper.StatusFailure,
Message: "make target directory: " + errDir.Error(),
})
return
}
}
// create bind mount
errLink := bindMount(path, args[0])
if errLink != nil {
helper.Handle(helper.Response{
Status: helper.StatusFailure,
Message: "create mount: " + errLink.Error(),
})
return
}
// update PV if affinity is set
if affinity != "no" {
err := updatePersistentVolume(name)
if err != nil {
helper.Handle(helper.Response{
Status: helper.StatusFailure,
Message: err.Error(),
})
return
}
}
helper.Handle(helper.Response{
Status: helper.StatusSuccess,
Message: "successfully created the volume",
})
},
}
func init() {
rootCmd.AddCommand(mountCmd)
}
func updatePersistentVolume(name string) error {
// out of cluster config
var kubeconfig *string
if home := homeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "/root/.kube/config", "absolute path to the kubeconfig file")
}
flag.Parse()
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
return errors.New("build config: " + *kubeconfig + ": " + err.Error())
}
// create the clientset
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return errors.New("clientset: " + err.Error())
}
nodeName, err := os.Hostname()
if nodeName == "" {
return errors.New("hostname: " + err.Error())
}
volumesClient := clientSet.CoreV1().PersistentVolumes()
pv, err := volumesClient.Get(name, metav1.GetOptions{})
if err != nil {
return errors.New("get pv: " + err.Error())
}
// update affinity annotation
affinity := &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: apis.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeName},
},
},
},
},
},
}
affinityJson, err := json.Marshal(*affinity)
if err != nil {
return errors.New("marshall annotation: " + err.Error())
}
annotations := pv.GetAnnotations()
annotations[v1.AlphaStorageNodeAffinityAnnotation] = string(affinityJson)
pv.SetAnnotations(annotations)
_, pvErr := volumesClient.Update(pv)
if pvErr != nil {
return errors.New("update pv: " + pvErr.Error())
}
// everything worked
return nil
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
func bindMount(source string, target string) error {
mountCmd := "/bin/mount"
cmd := exec.Command(mountCmd, "--bind", source, target)
output, err := cmd.CombinedOutput()
if err != nil {
return errors.New(string(output[:]))
}
return nil
}
|
[
"\"HOME\"",
"\"USERPROFILE\""
] |
[] |
[
"HOME",
"USERPROFILE"
] |
[]
|
["HOME", "USERPROFILE"]
|
go
| 2 | 0 | |
reinforcement/tensorflow/minigo/loop_train_eval.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper scripts to ensure that main.py commands are called correctly."""
import argh
import argparse
import cloud_logging
import logging
import os
import main
import shipname
import sys
import time
import shutil
import dual_net
import preprocessing
import numpy
import random
import glob
from utils import timer
from tensorflow import gfile
import tensorflow as tf
import logging
import goparams
import predict_games
import qmeas
from mlperf_compliance import mlperf_log
# Pull in environment variables. Run `source ./cluster/common` to set these.
#BUCKET_NAME = os.environ['BUCKET_NAME']
#BASE_DIR = "gs://{}".format(BUCKET_NAME)
BASE_DIR = goparams.BASE_DIR
MODELS_DIR = os.path.join(BASE_DIR, 'models')
SELFPLAY_DIR = os.path.join(BASE_DIR, 'data/selfplay')
BURY_DIR = os.path.join(BASE_DIR, 'bury_models')
BURY_SELFPLAY_DIR = os.path.join(BASE_DIR, 'bury_selfplay')
HOLDOUT_DIR = os.path.join(BASE_DIR, 'data/holdout')
SGF_DIR = os.path.join(BASE_DIR, 'sgf')
TRAINING_CHUNK_DIR = os.path.join(BASE_DIR, 'data', 'training_chunks')
ESTIMATOR_WORKING_DIR = os.path.join(BASE_DIR, 'estimator_working_dir')
# How many games before the selfplay workers will stop trying to play more.
MAX_GAMES_PER_GENERATION = goparams.MAX_GAMES_PER_GENERATION
# What percent of games to holdout from training per generation
HOLDOUT_PCT = goparams.HOLDOUT_PCT
def print_flags():
flags = {
#'BUCKET_NAME': BUCKET_NAME,
'BASE_DIR': BASE_DIR,
'MODELS_DIR': MODELS_DIR,
'SELFPLAY_DIR': SELFPLAY_DIR,
'HOLDOUT_DIR': HOLDOUT_DIR,
'SGF_DIR': SGF_DIR,
'TRAINING_CHUNK_DIR': TRAINING_CHUNK_DIR,
'ESTIMATOR_WORKING_DIR': ESTIMATOR_WORKING_DIR,
}
print("Computed variables are:")
print('\n'.join('--{}={}'.format(flag, value)
for flag, value in flags.items()))
def get_models():
"""Finds all models, returning a list of model number and names
sorted increasing.
Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc]
"""
all_models = gfile.Glob(os.path.join(MODELS_DIR, '*.meta'))
model_filenames = [os.path.basename(m) for m in all_models]
model_numbers_names = sorted([
(shipname.detect_model_num(m), shipname.detect_model_name(m))
for m in model_filenames])
return model_numbers_names
def get_latest_model():
"""Finds the latest model, returning its model number and name
Returns: (17, 000017-modelname)
"""
models = get_models()
if len(models) == 0:
models = [(0, '000000-bootstrap')]
return models[-1]
def get_model(model_num):
models = {k: v for k, v in get_models()}
if not model_num in models:
raise ValueError("Model {} not found!".format(model_num))
return models[model_num]
def evaluate(prev_model, cur_model, readouts=200, verbose=1, resign_threshold=0.95):
''' returns True if cur model should be used in future games '''
prev_model_save_path = os.path.join(MODELS_DIR, prev_model)
cur_model_save_path = os.path.join(MODELS_DIR, cur_model)
game_output_dir = os.path.join(SELFPLAY_DIR, cur_model)
game_holdout_dir = os.path.join(HOLDOUT_DIR, cur_model)
sgf_dir = os.path.join(SGF_DIR, cur_model)
cur_win_pct = main.evaluate_evenly(prev_model_save_path, cur_model_save_path, game_output_dir, readouts=readouts, games=goparams.EVAL_GAMES_PER_SIDE)
print('Evalute Win Pct = ', cur_win_pct)
qmeas.record('evaluate_win_pct', cur_win_pct)
keep = False
if cur_win_pct >= goparams.EVAL_WIN_PCT_FOR_NEW_MODEL:
qmeas.record('evaluate_choice', 'new')
keep = True
else:
qmeas.record('evaluate_choice', 'old')
keep = False
qmeas.record('eval_summary', {'win_pct': cur_win_pct, 'model': cur_model, 'keep': keep})
return keep
def gather():
print("Gathering game output...")
main.gather(input_directory=SELFPLAY_DIR,
output_directory=TRAINING_CHUNK_DIR)
def train():
model_num, model_name = get_latest_model()
print("Training on gathered game data, initializing from {}".format(model_name))
new_model_name = shipname.generate(model_num + 1)
print("New model will be {}".format(new_model_name))
load_file = os.path.join(MODELS_DIR, model_name)
save_file = os.path.join(MODELS_DIR, new_model_name)
#try:
main.train(ESTIMATOR_WORKING_DIR, TRAINING_CHUNK_DIR, save_file,
generation_num=model_num + 1)
#except:
# print("Got an error training, muddling on...")
# logging.exception("Train error")
return new_model_name
def bury_latest_model():
main._ensure_dir_exists(BURY_DIR)
main._ensure_dir_exists(BURY_SELFPLAY_DIR)
model_num, model_name = get_latest_model()
save_file = os.path.join(MODELS_DIR, model_name)
cmd = 'mv {}* {}/'.format(save_file, BURY_DIR)
# delete any selfplay games from that model too
print('Bury CMD: ', cmd)
if os.system(cmd) != 0:
raise Exception('Failed to bury model: ' + cmd)
cmd = 'mv {}* {}/'.format(os.path.join(SELFPLAY_DIR, model_name), BURY_SELFPLAY_DIR)
# delete any selfplay games from that model too
print('Bury Games CMD: ', cmd)
if os.system(cmd) != 0:
raise Exception('Failed to bury model: ' + cmd)
prev_num, prev_model_name = get_latest_model()
prev_save_file = os.path.join(MODELS_DIR, prev_model_name)
suffixes = ['.data-00000-of-00001', '.index', '.meta']
new_name = '{:06d}-continue'.format(model_num)
new_save_file = os.path.join(MODELS_DIR, new_name)
for suffix in suffixes:
cmd = 'cp {} {}'.format(prev_save_file + suffix, new_save_file + suffix)
print('DBUG ', cmd)
if os.system(cmd) != 0:
raise Exception('Failed to copy: ' + cmd)
def validate(model_num=None, validate_name=None):
""" Runs validate on the directories up to the most recent model, or up to
(but not including) the model specified by `model_num`
"""
if model_num is None:
model_num, model_name = get_latest_model()
else:
model_num = int(model_num)
model_name = get_model(model_num)
# Model N was trained on games up through model N-2, so the validation set
# should only be for models through N-2 as well, thus the (model_num - 1)
# term.
models = list(
filter(lambda num_name: num_name[0] < (model_num - 1), get_models()))
# Run on the most recent 50 generations,
# TODO(brianklee): make this hyperparameter dependency explicit/not hardcoded
holdout_dirs = [os.path.join(HOLDOUT_DIR, pair[1])
for pair in models[-50:]]
main.validate(ESTIMATOR_WORKING_DIR, *holdout_dirs,
checkpoint_name=os.path.join(MODELS_DIR, model_name),
validate_name=validate_name)
def echo():
pass # Flags are echo'd in the ifmain block below.
def rl_loop():
"""Run the reinforcement learning loop
This tries to create a realistic way to run the reinforcement learning with
all default parameters.
"""
if goparams.DUMMY_MODEL:
# monkeypatch the hyperparams so that we get a quickly executing network.
dual_net.get_default_hyperparams = lambda **kwargs: {
'k': 8, 'fc_width': 16, 'num_shared_layers': 1, 'l2_strength': 1e-4, 'momentum': 0.9}
dual_net.TRAIN_BATCH_SIZE = 16
dual_net.EXAMPLES_PER_GENERATION = 64
#monkeypatch the shuffle buffer size so we don't spin forever shuffling up positions.
preprocessing.SHUFFLE_BUFFER_SIZE = 1000
qmeas.stop_time('selfplay_wait')
print("Gathering game output...")
gather()
print("Training on gathered game data...")
_, model_name = get_latest_model()
new_model = train()
if goparams.EVALUATE_PUZZLES:
qmeas.start_time('puzzle')
new_model_path = os.path.join(MODELS_DIR, new_model)
sgf_files = [
'./benchmark_sgf/9x9_pro_YKSH.sgf',
'./benchmark_sgf/9x9_pro_IYMD.sgf',
'./benchmark_sgf/9x9_pro_YSIY.sgf',
'./benchmark_sgf/9x9_pro_IYHN.sgf',
]
result, total_pct = predict_games.report_for_puzzles(new_model_path, sgf_files, 2, tries_per_move=1)
print('accuracy = ', total_pct)
mlperf_log.minigo_print(key=mlperf_log.EVAL_ACCURACY,
value={"epoch": iteration, "value": total_pct})
mlperf_log.minigo_print(key=mlperf_log.EVAL_TARGET,
value=goparams.TERMINATION_ACCURACY)
qmeas.record('puzzle_total', total_pct)
qmeas.record('puzzle_result', repr(result))
qmeas.record('puzzle_summary', {'results': repr(result), 'total_pct': total_pct, 'model': new_model})
qmeas._flush()
with open(os.path.join(BASE_DIR, new_model + '-puzzles.txt'), 'w') as f:
f.write(repr(result))
f.write('\n' + str(total_pct) + '\n')
qmeas.stop_time('puzzle')
if total_pct >= goparams.TERMINATION_ACCURACY:
print('Reaching termination accuracy; ', goparams.TERMINATION_ACCURACY)
mlperf_log.minigo_print(key=mlperf_log.RUN_STOP,
value={"success": True})
with open('TERMINATE_FLAG', 'w') as f:
f.write(repr(result))
f.write('\n' + str(total_pct) + '\n')
if goparams.EVALUATE_MODELS:
if not evaluate(model_name, new_model):
bury_latest_model()
if __name__ == '__main__':
#tf.logging.set_verbosity(tf.logging.INFO)
seed = int(sys.argv[1])
iteration = int(sys.argv[2])
print('Setting random seed, iteration = ', seed, iteration)
seed = hash(seed) + iteration
print("training seed: ", seed)
random.seed(seed)
tf.set_random_seed(seed)
numpy.random.seed(seed)
qmeas.start(os.path.join(BASE_DIR, 'stats'))
# get TF logger
log = logging.getLogger('tensorflow')
log.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create file handler which logs even debug messages
fh = logging.FileHandler('tensorflow.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
rl_loop()
qmeas.end()
mlperf_log.minigo_print(key=mlperf_log.EVAL_STOP, value=iteration)
|
[] |
[] |
[
"BUCKET_NAME"
] |
[]
|
["BUCKET_NAME"]
|
python
| 1 | 0 | |
test/IntegrationTests.py
|
from __future__ import absolute_import
import multiprocessing
import os
import platform
import threading
import time
import unittest
import percy
import flask
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class IntegrationTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(IntegrationTests, cls).setUpClass()
options = Options()
if 'DASH_TEST_CHROMEPATH' in os.environ:
options.binary_location = os.environ['DASH_TEST_CHROMEPATH']
cls.driver = webdriver.Chrome(chrome_options=options)
loader = percy.ResourceLoader(
webdriver=cls.driver,
base_url='/assets',
root_dir='test/assets'
)
cls.percy_runner = percy.Runner(loader=loader)
cls.percy_runner.initialize_build()
@classmethod
def tearDownClass(cls):
super(IntegrationTests, cls).tearDownClass()
cls.driver.quit()
cls.percy_runner.finalize_build()
def setUp(self):
pass
def tearDown(self):
if platform.system() == 'Windows':
requests.get('http://localhost:8050/stop')
else:
self.server_process.terminate()
self.driver.back()
time.sleep(1)
def startServer(self, app):
"""
:param app:
:type app: dash.Dash
:return:
"""
if 'DASH_TEST_PROCESSES' in os.environ:
processes = int(os.environ['DASH_TEST_PROCESSES'])
else:
processes = 4
def run():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
app.run_server(
port=8050,
debug=False,
processes=processes,
threaded=False,
)
def run_windows():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
@app.server.route('/stop')
def _stop_server_windows():
stopper = flask.request.environ['werkzeug.server.shutdown']
stopper()
return 'stop'
app.run_server(
port=8050,
debug=False,
threaded=True
)
# Run on a separate process so that it doesn't block
system = platform.system()
if system == 'Windows':
# multiprocess can't pickle an inner func on windows (closure are not serializable by default on windows)
self.server_thread = threading.Thread(target=run_windows)
self.server_thread.start()
else:
self.server_process = multiprocessing.Process(target=run)
self.server_process.start()
time.sleep(2)
# Visit the dash page
self.driver.get('http://localhost:8050')
# Inject an error and warning logger
logger = '''
window.tests = {};
window.tests.console = {error: [], warn: [], log: []};
var _log = console.log;
var _warn = console.warn;
var _error = console.error;
console.log = function() {
window.tests.console.log.push({method: 'log', arguments: arguments});
return _log.apply(console, arguments);
};
console.warn = function() {
window.tests.console.warn.push({method: 'warn', arguments: arguments});
return _warn.apply(console, arguments);
};
console.error = function() {
window.tests.console.error.push({method: 'error', arguments: arguments});
return _error.apply(console, arguments);
};
'''
self.driver.execute_script(logger)
|
[] |
[] |
[
"DASH_TEST_PROCESSES",
"DASH_TEST_CHROMEPATH"
] |
[]
|
["DASH_TEST_PROCESSES", "DASH_TEST_CHROMEPATH"]
|
python
| 2 | 0 | |
tests/integration/github_test.go
|
// Copyright 2014 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// These tests call the live GitHub API, and therefore require a little more
// setup to run. See https://github.com/google/go-github/tree/master/tests/integration
// for more information
package tests
import (
"fmt"
"os"
"code.google.com/p/goauth2/oauth"
"github.com/google/go-github/github"
)
var (
client *github.Client
// auth indicates whether tests are being run with an OAuth token.
// Tests can use this flag to skip certain tests when run without auth.
auth bool
)
func init() {
token := os.Getenv("GITHUB_AUTH_TOKEN")
if token == "" {
println("!!! No OAuth token. Some tests won't run. !!!\n")
client = github.NewClient(nil)
} else {
t := &oauth.Transport{
Token: &oauth.Token{AccessToken: token},
}
client = github.NewClient(t.Client())
auth = true
}
}
func checkAuth(name string) bool {
if !auth {
fmt.Printf("No auth - skipping portions of %v\n", name)
}
return auth
}
|
[
"\"GITHUB_AUTH_TOKEN\""
] |
[] |
[
"GITHUB_AUTH_TOKEN"
] |
[]
|
["GITHUB_AUTH_TOKEN"]
|
go
| 1 | 0 | |
tests/previs/test_sequence_manager_extension.py
|
# -*- coding: utf-8 -*-
import unittest
import os
# prepare for test
os.environ['ANIMA_TEST_SETUP'] = ""
from anima.env import mayaEnv # to setup maya extensions
import pymel.core
from anima.edit import Sequence, Media, Video, Track, Clip, File
class SequenceManagerTestCase(unittest.TestCase):
"""tests the SequenceManagerExtension class
"""
def setUp(self):
"""set up the test
"""
# create a new scene and get the sequenceManager in the scene
pymel.core.newFile(force=True)
self.sm = pymel.core.PyNode('sequenceManager1')
def test_from_xml_path_argument_skipped(self):
"""testing if a TypeError will be raised when the path argument is
skipped
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml()
self.assertEqual(
cm.exception.message,
'from_xml() takes exactly 2 arguments (1 given)'
)
def test_from_xml_path_argument_is_not_a_string(self):
"""testing if a TypeError will be raised when the path argument is not
a string
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml(30)
self.assertEqual(
cm.exception.message,
'path argument in SequenceManager.from_xml should be a string, '
'not int'
)
def test_from_xml_path_argument_is_not_a_valid_path(self):
"""testing if a IOError will be raised when the path argument is not
a valid path
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(IOError) as cm:
sm.from_xml('not a valid path')
self.assertEqual(
cm.exception.message,
'Please supply a valid path to an XML file!'
)
def test_from_xml_generates_correct_sequencer_hierarchy(self):
"""testing if from_xml method will generate Sequences and shots
correctly
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
sequences = sm.sequences.get()
self.assertEqual(len(sequences), 1)
sequencer = sequences[0]
self.assertIsInstance(sequencer, pymel.core.nt.Sequencer)
self.assertEqual(sequencer.duration, 111)
self.assertEqual(sequencer.sequence_name.get(), 'SEQ001_HSNI_003')
# check scene fps
self.assertEqual(pymel.core.currentUnit(q=1, t=1), 'film')
# check timecode
time = pymel.core.PyNode('time1')
self.assertEqual(time.timecodeProductionStart.get(), 0.0)
shots = sequencer.shots.get()
self.assertEqual(len(shots), 3)
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1024, shot1.wResolution.get())
self.assertEqual(778, shot1.hResolution.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(34.0, shot1.sequenceEndFrame.get())
self.assertEqual(34.0, shot1.duration)
self.assertEqual(10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0010_v001.mov',
shot1.output.get()
)
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1024, shot2.wResolution.get())
self.assertEqual(778, shot2.hResolution.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(35.0, shot2.sequenceStartFrame.get())
self.assertEqual(65.0, shot2.sequenceEndFrame.get())
self.assertEqual(31.0, shot2.duration)
self.assertEqual(10.0, shot2.startFrame.get())
self.assertEqual(40.0, shot2.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0020_v001.mov',
shot2.output.get()
)
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1024, shot3.wResolution.get())
self.assertEqual(778, shot3.hResolution.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(66.0, shot3.sequenceStartFrame.get())
self.assertEqual(111.0, shot3.sequenceEndFrame.get())
self.assertEqual(46.0, shot3.duration)
self.assertEqual(10.0, shot3.startFrame.get())
self.assertEqual(55.0, shot3.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0030_v001.mov',
shot3.output.get()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v002.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(75.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(64.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(76.0, shot3.sequenceStartFrame.get())
self.assertEqual(131.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_edl method will update Sequences and shots
correctly with the edl file
"""
path = os.path.abspath('./test_data/test_v002.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_used_more_than_one_times(self):
"""testing if from_edl method will update Sequences and shots correctly
with shot are used more than once
"""
path = os.path.abspath('./test_data/test_v004.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
# set a camera for shot4
shot3.set_camera('persp')
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check if there are 4 shots
self.assertEqual(4, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
# Clip4
# there should be an extra shot
shot4 = seq.shots.get()[-1]
self.assertEqual('0030', shot4.shotName.get())
self.assertEqual(1, shot4.track.get())
self.assertEqual(133.0, shot4.sequenceStartFrame.get())
self.assertEqual(189.0, shot4.sequenceEndFrame.get())
self.assertEqual(65.0, shot4.startFrame.get())
self.assertEqual(121.0, shot4.endFrame.get())
# check if their cameras also the same
self.assertEqual(
shot3.get_camera(),
shot4.get_camera()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_removed(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v003.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# we should have 2 shots only
self.assertEqual(2, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
# removed
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(55.0, shot3.sequenceStartFrame.get())
self.assertEqual(110.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_to_xml_will_generate_proper_xml_string(self):
"""testing if a proper xml compatible string will be generated with
to_xml() method
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
result = sm.to_xml()
with open(path) as f:
expected = f.read()
self.maxDiff = None
self.assertEqual(expected, result)
def test_create_sequence_is_working_properly(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence()
self.assertEqual(seq.type(), 'sequencer')
self.maxDiff = None
self.assertEqual(self.sm, seq.message.connections()[0])
def test_create_sequence_is_properly_setting_the_sequence_name(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence('Test Sequence')
self.assertEqual(
'Test Sequence',
seq.sequence_name.get()
)
def test_to_edl_is_working_properly(self):
"""testing if to_edl method is working properly
"""
import edl
# create a sequence
seq1 = self.sm.create_sequence('sequence1')
seq1.create_shot('shot1')
seq1.create_shot('shot2')
seq1.create_shot('shot3')
l = self.sm.to_edl()
self.assertIsInstance(
l,
edl.List
)
def test_to_edl_will_generate_a_proper_edl_content(self):
"""testing if to_edl will generate a proper edl content
"""
edl_path = os.path.abspath('./test_data/test_v001.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
l = sm.to_edl()
result = l.to_string()
with open(edl_path) as f:
expected_edl_content = f.read()
self.assertEqual(
expected_edl_content,
result
)
def test_generate_sequence_structure_returns_a_sequence_instance(self):
"""testing if generate_sequence_structure() method will return a
Sequence instance
"""
sm = pymel.core.PyNode('sequenceManager1')
seq1 = sm.create_sequence('sequence1')
shot1 = seq1.create_shot('shot1')
shot1.output.set('/tmp/shot1.mov')
shot2 = seq1.create_shot('shot2')
shot2.output.set('/tmp/shot2.mov')
result = sm.generate_sequence_structure()
self.assertIsInstance(
result,
Sequence
)
def test_generate_sequence_structure_will_generate_sequences_and_shots_with_correct_number_of_tracks(self):
"""testing if a proper sequence structure will be generated by using
the generate_sequence_structure() method with correct number of tracks
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
seq1 = sm.sequences.get()[0]
shots = seq1.shots.get()
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
seq = sm.generate_sequence_structure()
tracks = seq.media.video.tracks
self.assertEqual(len(tracks), 1)
track1 = tracks[0]
clips = track1.clips
self.assertEqual(len(clips), 3)
def test_set_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.shot_name_template.get(), test_template)
def test_get_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.get_shot_name_template(), test_template)
def test_get_shot_name_template_will_create_shot_name_template_attribute_if_missing(self):
"""testing if set_shot_name_template() will create the
shot_name_template attribute if missing
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
result = sm.get_shot_name_template()
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(result, '<Sequence>_<Shot>_<Version>')
def test_set_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.version.get(), test_version)
def test_get_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.get_version(), test_version)
def test_get_version_will_create_attribute_if_missing(self):
"""testing if get_version() will create the missing version attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
result = sm.get_version()
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(result, '')
def test_set_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
test_task_name = 'Animation'
sm.set_task_name(test_task_name)
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(sm.task_name.get(), test_task_name)
def test_get_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
test_task_name = 'Animation'
sm.set_task_name(test_task_name)
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(sm.get_task_name(), test_task_name)
def test_get_task_name_will_create_attribute_if_missing(self):
"""testing if get_task_name() will create the missing task_name attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
result = sm.get_task_name()
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(result, '')
def test_set_take_name_is_working_properly(self):
"""testing if set_take_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
test_take_name = 'Main'
sm.set_take_name(test_take_name)
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(sm.take_name.get(), test_take_name)
def test_get_take_name_is_working_properly(self):
"""testing if set_take_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
test_take_name = 'Main'
sm.set_take_name(test_take_name)
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(sm.get_take_name(), test_take_name)
def test_get_take_name_will_create_attribute_if_missing(self):
"""testing if get_take_name() will create the missing take_name attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
result = sm.get_take_name()
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(result, '')
def test_generate_sequence_structure_is_working_properly(self):
"""testing if generate_sequence_structure() method is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
from anima.env import mayaEnv
mayaEnv.Maya.set_fps(fps=24)
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(24)
shot1.sequenceStartFrame.set(0)
shot1.track.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(10)
shot2.endFrame.set(35)
shot2.sequenceStartFrame.set(25)
shot2.track.set(1)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(15)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(25)
shot3.endFrame.set(50)
shot3.sequenceStartFrame.set(45)
shot3.track.set(2)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(20)
seq = sm.generate_sequence_structure()
self.assertIsInstance(seq, Sequence)
rate = seq.rate
self.assertEqual('24', rate.timebase)
self.assertEqual(False, rate.ntsc)
self.assertEqual('00:00:00:00', seq.timecode)
self.assertEqual(False, seq.ntsc)
media = seq.media
self.assertIsInstance(media, Media)
video = media.video
self.assertIsInstance(video, Video)
self.assertIsNone(media.audio)
self.assertEqual(2, len(video.tracks))
track1 = video.tracks[0]
self.assertIsInstance(track1, Track)
self.assertEqual(len(track1.clips), 2)
self.assertEqual(track1.enabled, True)
track2 = video.tracks[1]
self.assertIsInstance(track2, Track)
self.assertEqual(len(track2.clips), 1)
self.assertEqual(track2.enabled, True)
clip1 = track1.clips[0]
self.assertIsInstance(clip1, Clip)
self.assertEqual('Video', clip1.type)
self.assertEqual('SEQ001_HSNI_003_0010_v001', clip1.id)
self.assertEqual('SEQ001_HSNI_003_0010_v001', clip1.name)
self.assertEqual(10, clip1.in_) # handle
self.assertEqual(35, clip1.out) # handle + duration
self.assertEqual(0, clip1.start) # sequenceStartFrame
self.assertEqual(25, clip1.end) # sequenceEndFrame + 1
clip2 = track1.clips[1]
self.assertIsInstance(clip2, Clip)
self.assertEqual('Video', clip2.type)
self.assertEqual('SEQ001_HSNI_003_0020_v001', clip2.id)
self.assertEqual('SEQ001_HSNI_003_0020_v001', clip2.name)
self.assertEqual(15, clip2.in_) # handle
self.assertEqual(41, clip2.out) # handle + duration
self.assertEqual(25, clip2.start) # sequenceStartFrame
self.assertEqual(51, clip2.end) # sequenceEndFrame + 1
clip3 = track2.clips[0]
self.assertIsInstance(clip3, Clip)
self.assertEqual('Video', clip3.type)
self.assertEqual('SEQ001_HSNI_003_0030_v001', clip3.id)
self.assertEqual('SEQ001_HSNI_003_0030_v001', clip3.name)
self.assertEqual(20, clip3.in_) # startFrame
self.assertEqual(46, clip3.out) # endFrame + 1
self.assertEqual(45, clip3.start) # sequenceStartFrame
self.assertEqual(71, clip3.end) # sequenceEndFrame + 1
file1 = clip1.file
self.assertIsInstance(file1, File)
self.assertEqual('SEQ001_HSNI_003_0010_v001', file1.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0010_v001.mov',
file1.pathurl)
self.assertEqual(45, file1.duration) # including handles
file2 = clip2.file
self.assertIsInstance(file2, File)
self.assertEqual('SEQ001_HSNI_003_0020_v001', file2.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0020_v001.mov',
file2.pathurl)
self.assertEqual(56, file2.duration) # including handles
file3 = clip3.file
self.assertIsInstance(file3, File)
self.assertEqual('SEQ001_HSNI_003_0030_v001', file3.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0030_v001.mov',
file3.pathurl)
self.assertEqual(66, file3.duration) # including handles
|
[] |
[] |
[
"ANIMA_TEST_SETUP"
] |
[]
|
["ANIMA_TEST_SETUP"]
|
python
| 1 | 0 | |
qqbot/core/util/logging.py
|
# -*- coding: utf-8 -*-
import logging
import os
import platform
from logging import FileHandler
from logging.handlers import TimedRotatingFileHandler
LOG_COLORS_CONFIG = {
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
}
def _getLevel():
level = logging.INFO
level_str = os.getenv("QQBOT_LOG_LEVEL", str(logging.INFO))
try:
level = int(level_str)
if level not in (
logging.NOTSET,
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.CRITICAL,
):
logging.error("wrong logging level %s" % level_str)
level = logging.INFO
except ValueError:
logging.error("wrong logging level %s" % level_str)
logging.info("logging level: %d" % level)
return level
def getLogger(name=None):
print_format = (
"%(asctime)s - "
"\033[1;33m%(levelname)s: %(name)s - %(filename)s - %(funcName)s(line: %(lineno)s):\033[0m%(message)s"
""
)
file_format = "%(asctime)s-%(name)s - %(filename)s - %(funcName)s - line %(lineno)s-%(levelname)s - %(message)s"
if name is None:
logger = logging.getLogger("qqbot")
else:
logger = logging.getLogger(name)
logging.basicConfig(format=print_format)
logger.setLevel(level=_getLevel())
# FileHandler
no_log = os.getenv("QQBOT_DISABLE_LOG", "0")
if no_log == "0":
formatter = logging.Formatter(file_format)
if name is None:
name = "qqbot"
log_file = os.path.join(os.getcwd(), name + ".log")
file_handler = None
if platform.system().lower() != "windows":
# do not use RotatingFileHandler under Windows
# due to multi-process issue
# file_handler = RotatingFileHandler(
# log_file,
# maxBytes=1024 * 1024,
# backupCount=5,
# )
# save last 7 days log
file_handler = TimedRotatingFileHandler(
filename=log_file,
when="D",
backupCount=7,
)
else:
file_handler = FileHandler(log_file, encoding="utf-8")
logger.debug(
"qqbot: dumping log file to {path}".format(path=os.path.realpath(log_file))
)
file_handler.setLevel(level=_getLevel())
file_handler.setFormatter(formatter)
if len(logger.handlers) == 0:
logger.addHandler(file_handler)
return logger
|
[] |
[] |
[
"QQBOT_DISABLE_LOG",
"QQBOT_LOG_LEVEL"
] |
[]
|
["QQBOT_DISABLE_LOG", "QQBOT_LOG_LEVEL"]
|
python
| 2 | 0 | |
modules/dbnd/test_dbnd/tracking/managers/test_subdag_runtime.py
|
import os
import mock
from pytest import fixture
import dbnd._core.configuration.environ_config
from dbnd import dbnd_tracking_stop, log_metric, task
from dbnd._core.current import try_get_databand_run
PARENT_DAG = "parent_dag"
CHILD_DAG = "child_dag"
FULL_DAG_NAME = "%s.%s" % (PARENT_DAG, CHILD_DAG)
@task
def fake_task_inside_dag():
log_metric("Testing", "Metric")
run = try_get_databand_run()
assert run is not None, "Task should run in databand run, check airflow tracking!"
root_task = run.root_task
# Validate regular subdag properties
assert run.job_name == "%s.%s.fake_task_inside_dag" % (PARENT_DAG, CHILD_DAG)
# this test got problematic cause airflow_inplace task named as the script that ran it
assert root_task.task_name
return "Regular test"
patch_dict = {
"AIRFLOW_CTX_DAG_ID": FULL_DAG_NAME,
"AIRFLOW_CTX_TASK_ID": fake_task_inside_dag.__name__,
"AIRFLOW_CTX_EXECUTION_DATE": "2020-04-06T14:25:00",
}
@fixture
def with_airflow_tracking_env():
dbnd._core.configuration.environ_config.reset_dbnd_project_config()
try:
with mock.patch.dict(os.environ, patch_dict):
yield
finally:
dbnd._core.configuration.environ_config.reset_dbnd_project_config()
class TestTaskInplaceRun(object):
def test_sanity_with_airflow(self, with_airflow_tracking_env):
fake_task_inside_dag()
dbnd_tracking_stop()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
interesting.py
|
import os
from functools import lru_cache
from typing import Dict, List
from urllib.parse import urljoin
import requests
from praw.models import Submission
from prawutils import get_reddit
reddit = get_reddit()
interest_cache_path = 'cached_interest.sqlite'
HF_INFERENCE_URL = os.getenv('HF_INFERENCE_URL', 'http://vpn.tchen.xyz:33960/')
@lru_cache(maxsize=1000000)
def get_interest(submission: Submission) -> float:
body = f'''Title: {submission.title}
Subreddit: {submission.subreddit.display_name}
{submission.selftext}'''
if len(submission.selftext) > 0 and submission.num_comments > 0:
for i in range(min(len(submission.comments), 10)):
body += f'\n{submission.comments[i].body}'
url = urljoin(HF_INFERENCE_URL, 'gpt2loss')
return float(requests.post(url, json=body[:1000]).json()['loss'])
def get_interests(submissions: List[Submission]) -> Dict[str, float]:
return {
submission.id: get_interest(submission)
for submission in submissions
}
|
[] |
[] |
[
"HF_INFERENCE_URL"
] |
[]
|
["HF_INFERENCE_URL"]
|
python
| 1 | 0 | |
tests/test_verifone.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `verifone` package."""
import unittest
import logging
import os
from datetime import datetime
from verifone import verifone
try:
import http.client as http_client
except ImportError:
import httplib as http_client
# logging
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
class TestVerifone(unittest.TestCase):
""" Test for Verifone package. """
@classmethod
def setUpClass(cls):
""" Set up our Verifone client for tests. It requires the following environment variables: AGREEMENTCODE, RSAPRIVATEKEY and EMAIL """
cls._verifone_client = verifone.Verifone(os.environ.get('AGREEMENTCODE'), os.environ.get('RSAPRIVATEKEY'), os.environ.get('RSAVERIFONEPUBLICKEY'), "IntegrationTest", "6.0.37")
cls._test_requests = os.environ.get('TESTSENDINGREQUEST')
cls._verifone_client_2 = verifone.Verifone(os.environ.get('AGREEMENTCODE'), os.environ.get('RSAPRIVATEKEY'), os.environ.get('RSAVERIFONEPUBLICKEY'), "IntegrationTest", "6.0.37", return_error_dict=1)
def test_001_create_object_with_defaults(self):
""" Test creating a new object with default values """
self.assertTrue(self._verifone_client._currency == "EUR")
self.assertTrue(self._verifone_client._test_mode == 0)
def test_002_get_endpoint(self):
""" Test to get endpoint url and change to test mode """
self.assertEqual(self._verifone_client.endpoint, 'https://epayment1.point.fi/pw/serverinterface')
self._verifone_client._test_mode = 1
self.assertEqual(self._verifone_client.endpoint, 'https://epayment.test.point.fi/pw/serverinterface')
self.assertTrue(self._verifone_client._test_mode == 1)
def test_003_create_object_wrong_currency(self):
""" Test creating a new object with wrong currency, so default currency should be used """
default_currency = 'EUR'
verifone_cl = verifone.Verifone('test_apikey', '1234', 'Test', 'IntegrationTest', '6.0.37', 'euro')
self.assertEqual(verifone_cl._currency, default_currency)
verifone_cl = verifone.Verifone('test_apikey', '1234', 'Test', 'IntegrationTest', '6.0.37', 'eu1')
self.assertEqual(verifone_cl._currency, default_currency)
verifone_cl = verifone.Verifone('test_apikey', '1234', 'Test', 'IntegrationTest', '6.0.37', '€')
self.assertEqual(verifone_cl._currency, default_currency)
verifone_cl = verifone.Verifone('test_apikey', '1234', 'Test', 'IntegrationTest', '6.0.37', 'abc')
self.assertNotEqual(verifone_cl._currency, default_currency)
def test_004_create_object_currency_lower(self):
""" Test creating a new object with currency in lower case """
verifone_cl = verifone.Verifone('test_apikey', '1234', 'Test', 'IntegrationTest', '6.0.37', 'eur')
self.assertEqual(verifone_cl._currency, 'EUR')
def test_005_update_currency(self):
""" Test that currency is updated """
new_value = "SEK"
self._verifone_client.currency = new_value
self.assertEqual(self._verifone_client._currency, new_value)
with self.assertRaises(ValueError):
self._verifone_client.currency = "Euro"
with self.assertRaises(ValueError):
self._verifone_client.currency = ""
with self.assertRaises(ValueError):
self._verifone_client.currency = "kr3"
with self.assertRaises(ValueError):
self._verifone_client.currency = "€"
new_value = "abc"
self._verifone_client.currency = new_value
self.assertEqual(self._verifone_client._currency, new_value.upper())
self._verifone_client._currency = "abc"
with self.assertRaises(ValueError):
self._verifone_client.currency
new_value = "eur"
self._verifone_client.currency = new_value
self.assertEqual(self._verifone_client._currency, new_value.upper())
self.assertEqual(self._verifone_client.currency, '978')
def test_006_is_available(self):
""" Test connection to Verifone server """
if (self._test_requests == "1"):
response = self._verifone_client.is_available()
self.assertTrue(response['i-f-1-1_availability'] == '2')
self._verifone_client._test_mode = 1
response = self._verifone_client.is_available()
self.assertTrue(response['i-f-1-1_availability'] == '2')
def test_007_get_payment_methods(self):
""" Test to get all available payment methods """
if (self._test_requests == "1"):
response = self._verifone_client.list_payment_methods()
self.assertIsNotNone(response['s-t-1-30_payment-method-code-0'])
def test_008_list_saved_payment_methods(self):
""" Test to get saved payment methods """
if (self._test_requests == "1"):
params = {
's-f-1-30_buyer-first-name': 'Test',
's-f-1-30_buyer-last-name': 'Tester',
's-f-1-100_buyer-email-address': os.environ.get('EMAIL'),
's-t-1-30_buyer-phone-number': '123456789',
's-t-1-255_buyer-external-id': os.environ.get('EXTERNALID'),
}
response = self._verifone_client.list_saved_payment_methods(params)
self.assertTrue('l-t-1-20_payment-method-id-0' in response)
def test_009_remove_saved_payment_method(self):
""" Test to remove saved payment method when saved payment method is wrong """
if (self._test_requests == "1"):
response = self._verifone_client.remove_saved_payment_method(123456)
self.assertEqual(response['l-t-1-10_removed-count'], '0')
def test_010_generate_payment_data(self):
""" Test to generate payment data """
customer_id = '1234567890asdfghjklp-1234567890zxcvbnmklo'
note = "Note"
params = {
'order_number': '58459',
'order_timestamp': '2018-08-02 09:14:12',
'payment_timestamp': '2018-08-02 11:59:16',
'locale': 'fi_FI',
'amount_gross': 1.51,
'amount_net': 1.22,
'vat_amount': 0.29,
'first_name': 'Test',
'last_name': 'Tester',
'email': '[email protected]',
'phone': '1212121212121212',
'address': 'Test Street 4',
'postal_code': 33200,
'city': 'Tampere',
'country': 'fi',
'style': '',
'cancel_url': 'https://cancel.url',
'error_url': 'https://error.url',
'expired_url': 'https://expired.url',
'rejected_url': 'https://rejected.url',
'success_url': 'https://success.url',
'success_url_server': 'https://server.success.url',
'save_method': 3,
'payment_method': 'nordea-e-payment',
'customer_id': customer_id,
'note': note,
'products': [
{
'name': 'er_7142303001',
'pieces': 1,
'discount': 0,
'vat': 24.00,
'amount_gross': 1.51,
'amount_net': 1.22,
'unit_cost_gross': 1.51,
},
],
'dynamic_feedback': 's-t-1-4_error-code,i-t-6-6_card-pan-first6,i-t-4-4_card-pan-last4',
}
data = self._verifone_client.generate_payment_data(params)
self.assertTrue('s-t-256-256_signature-one' in data)
self.assertIsNotNone(data['s-t-256-256_signature-one'])
self.assertTrue('s-t-256-256_signature-two' in data)
self.assertIsNotNone(data['s-t-256-256_signature-two'])
self.assertEqual(data['l-f-1-20_order-gross-amount'], 151)
self.assertEqual(data['l-f-1-20_order-net-amount'], 122)
self.assertEqual(data['l-f-1-20_order-vat-amount'], 29)
self.assertEqual(data['s-t-1-255_buyer-external-id'], customer_id)
self.assertEqual(data['s-t-1-36_order-note'], note)
self.assertIsNotNone(data['s-t-1-1024_dynamic-feedback'])
self.assertEqual(data['s-t-1-30_bi-name-0'], 'er_7142303001')
self.assertEqual(len(data['s-t-1-30_bi-name-0']), 13)
def test_011_generate_payment_data(self):
""" Test to generate payment data when all data is not defined """
params = {
'order_number': '58459',
'locale': 'fi_FI',
'first_name': 'Test',
'last_name': 'Tester',
'email': '[email protected]',
'cancel_url': 'https://cancel.url',
'error_url': 'https://error.url',
'expired_url': 'https://expired.url',
'rejected_url': 'https://rejected.url',
'success_url': 'https://success.url',
'success_url_server': 'https://server.success.url',
'skip_confirmation': 1,
'country': '246',
'products': [
{
'name': 'er_7142303001',
'pieces': 1,
'vat': 24.00,
},
]
}
data = self._verifone_client.generate_payment_data(params)
self.assertTrue('s-t-1-30_style-code' in data)
self.assertTrue('i-t-1-1_skip-confirmation-page' in data)
self.assertEqual(data['i-t-1-1_skip-confirmation-page'], 1)
self.assertEqual(data['i-t-1-3_delivery-address-country-code'], '246')
def test_012_generate_payment_link(self):
""" Test to generate payment link """
if (self._test_requests == "1"):
params = {
'locale-f-2-5_payment-locale': 'fi_FI',
't-f-14-19_order-expiry-timestamp': '2018-10-02 09:14:12',
's-f-1-36_order-number': '1234',
't-f-14-19_order-timestamp': '2018-08-03 04:58:22',
's-t-1-36_order-note': 'Test payment',
'i-f-1-3_order-currency-code': '978',
'l-f-1-20_order-gross-amount': '7602',
'l-f-1-20_order-net-amount': '6131',
'l-f-1-20_order-vat-amount': '1471',
's-t-1-30_payment-method-code': 'visa',
's-t-1-36_payment-link-number': '1234567',
's-f-1-32_payment-link-delivery-mode': 'email',
's-f-1-30_buyer-first-name': "Test",
's-f-1-30_buyer-last-name': "Tester",
's-t-1-30_buyer-phone-number': '1234567890',
's-f-1-100_buyer-email-address': os.environ.get('EMAIL'),
's-t-1-30_delivery-address-line-one': "Test Street 3",
's-t-1-30_delivery-address-city': "Tampere",
's-t-1-30_delivery-address-postal-code': "33210",
'i-t-1-3_delivery-address-country-code': 'fi',
's-t-1-30_bi-name-0': 'Test Product', # can be 0-50 items
'l-t-1-20_bi-unit-gross-cost-0': '7602',
'i-t-1-11_bi-unit-count-0': '1',
'l-t-1-20_bi-gross-amount-0': '7602',
'l-t-1-20_bi-net-amount-0': '6131',
'i-t-1-4_bi-vat-percentage-0': '2400',
'i-t-1-4_bi-discount-percentage-0': 0,
}
with self.assertRaises(ValueError):
self._verifone_client.generate_payment_link(params)
result = self._verifone_client_2.generate_payment_link(params)
self.assertTrue('s-f-1-30_error-message' in result)
def test_013_get_payment_link_status(self):
""" Test to get payment link status. """
if (self._test_requests == "1"):
with self.assertRaises(ValueError):
self._verifone_client.get_payment_link_status(12345678)
result = self._verifone_client_2.get_payment_link_status(12345678)
self.assertTrue('s-f-1-30_error-message' in result)
def test_014_reactivate_payment_link(self):
""" Test to reactivate payment link. """
if (self._test_requests == "1"):
current_time = datetime.now()
timestamp = current_time.strftime('%Y-%m-%d %H:%M:%S')
with self.assertRaises(ValueError):
self._verifone_client.reactivate_payment_link(12345678, timestamp)
result = self._verifone_client_2.reactivate_payment_link(12345678, timestamp)
self.assertTrue('s-f-1-30_error-message' in result)
def test_015_process_payment(self):
""" Test to process payment """
if (self._test_requests == "1"):
params = {
's-f-1-30_buyer-first-name': 'Test',
's-f-1-30_buyer-last-name': 'Tester',
's-f-1-100_buyer-email-address': os.environ.get('EMAIL'),
's-t-1-30_buyer-phone-number': '123456789',
's-t-1-255_buyer-external-id': os.environ.get('EXTERNALID'),
}
response = self._verifone_client.list_saved_payment_methods(params)
saved_payment_id = response['l-t-1-20_payment-method-id-0']
self.assertIsNotNone(saved_payment_id)
params = {
'locale-f-2-5_payment-locale': 'fi_FI',
's-f-1-36_order-number': '1234',
'l-f-1-20_order-gross-amount': 2391,
's-f-1-30_buyer-first-name': "Test",
's-f-1-30_buyer-last-name': "Tester",
's-t-1-30_buyer-phone-number': 123456789,
's-f-1-100_buyer-email-address': os.environ.get('EMAIL'),
's-t-1-30_delivery-address-line-one': "Test Street 3",
's-t-1-30_delivery-address-city': "Tampere",
's-t-1-30_delivery-address-postal-code': "33210",
'i-t-1-3_delivery-address-country-code': 'FI',
's-t-1-30_bi-name-0': 'Test Product',
'l-t-1-20_bi-unit-gross-cost-0': 2391,
'i-t-1-11_bi-unit-count-0': 1,
'l-t-1-20_bi-gross-amount-0': 2391,
'l-t-1-20_bi-net-amount-0': 1928,
'i-t-1-4_bi-vat-percentage-0': 2400,
'i-t-1-4_bi-discount-percentage-0': 0,
's-t-1-255_buyer-external-id': os.environ.get('EXTERNALID'),
'l-t-1-20_saved-payment-method-id': saved_payment_id,
}
response = self._verifone_client.process_payment(params)
self.assertTrue('l-f-1-20_transaction-number' in response)
self.assertIsNotNone(response['l-f-1-20_transaction-number'])
def test_016_list_transaction_numbers(self):
""" Test to get transaction numbers for one order. """
if (self._test_requests == "1"):
response = self._verifone_client.list_transaction_numbers("1234")
self.assertTrue('l-f-1-20_transaction-number-0' in response)
def test_017_get_payment_status(self):
""" Test to get payment status """
if (self._test_requests == "1"):
response = self._verifone_client.list_transaction_numbers("1234")
transaction_id = response['l-f-1-20_transaction-number-0']
self.assertIsNotNone(transaction_id)
params = {
's-f-1-30_payment-method-code': 'visa',
'l-f-1-20_transaction-number': transaction_id,
}
response = self._verifone_client.get_payment_status(params)
self.assertTrue('s-f-1-30_payment-status-code' in response)
def test_018_refund_payment(self):
""" Test to refund payment """
if (self._test_requests == "1"):
response = self._verifone_client.list_transaction_numbers("1234")
transaction_id = response['l-f-1-20_transaction-number-0']
self.assertIsNotNone(transaction_id)
params = {
'l-f-1-20_refund-amount': 1,
's-f-1-30_payment-method-code': 'visa',
'l-f-1-20_transaction-number': transaction_id,
}
response = self._verifone_client.refund_payment(params)
self.assertTrue('l-f-1-20_transaction-number' in response)
def test_019_cancel_payment(self):
""" Test to cancel payment. """
if (self._test_requests == "1"):
params = {
's-f-1-30_payment-method-code': 'visa',
'l-f-1-20_transaction-number': '123456',
}
with self.assertRaises(ValueError):
self._verifone_client.cancel_payment(params)
result = self._verifone_client_2.cancel_payment(params)
self.assertTrue('s-f-1-30_error-message' in result)
def test_020_process_supplementary(self):
""" Test to process supplementary. """
if (self._test_requests == "1"):
params = {
'l-f-1-20_original-transaction-number': '123456',
's-f-1-30_payment-method-code': 'visa',
'l-f-1-20_order-gross-amount': 500,
}
with self.assertRaises(ValueError):
self._verifone_client.process_supplementary(params)
result = self._verifone_client_2.process_supplementary(params)
self.assertTrue('s-f-1-30_error-message' in result)
self.assertEqual(result['s-f-1-30_error-message'],'invalid-transaction-number')
def test_021_get_endpoint(self):
""" Test for getting endpoints """
verifone_cl = verifone.Verifone('test_apikey', '1234', 'Test', 'IntegrationTest', '6.0.37', 'eur')
self.assertEqual(verifone_cl.endpoint, 'https://epayment1.point.fi/pw/serverinterface')
self.assertEqual(verifone_cl.endpoint2, 'https://epayment2.point.fi/pw/serverinterface')
verifone_cl.test_mode = 1
self.assertEqual(verifone_cl.endpoint, 'https://epayment.test.point.fi/pw/serverinterface')
self.assertEqual(verifone_cl.endpoint2, 'https://epayment.test.point.fi/pw/serverinterface')
def test_022_save_test_mode(self):
""" Test for save test mode """
verifone_cl = verifone.Verifone('test_apikey', '1234', 'Test', 'IntegrationTest', '6.0.37', 'eur')
self.assertEqual(verifone_cl.test_mode, 0)
verifone_cl.test_mode = 1
self.assertEqual(verifone_cl.test_mode, 1)
with self.assertRaises(ValueError):
verifone_cl.test_mode = 3
verifone_cl.test_mode = None
self.assertEqual(verifone_cl.test_mode, 0)
def test_023_get_endpoint(self):
""" Test for getting post urls """
verifone_cl = verifone.Verifone('test_apikey', '1234', 'Test', 'IntegrationTest', '6.0.37', 'eur')
self.assertIsNotNone(verifone_cl.posturl)
self.assertEqual(verifone_cl.posturl1, 'https://epayment1.point.fi/pw/payment')
self.assertEqual(verifone_cl.posturl2, 'https://epayment2.point.fi/pw/payment')
verifone_cl.test_mode = 1
self.assertIsNotNone(verifone_cl.posturl)
self.assertEqual(verifone_cl.posturl1, 'https://epayment.test.point.fi/pw/payment')
self.assertEqual(verifone_cl.posturl2, 'https://epayment.test.point.fi/pw/payment')
def test_024_build_product_data(self):
""" Test building product data """
params = [{
'name': 'er_7142303001',
'pieces': 1,
'discount': 10,
'vat': 24.00,
'amount_net': 1.22,
'unit_cost_gross': 1.51,
}]
response = self._verifone_client.build_product_data(params)
self.assertTrue('l-t-1-20_bi-unit-gross-cost-0' in response)
self.assertTrue('l-t-1-20_bi-net-amount-0' in response)
self.assertEqual(response['i-t-1-4_bi-discount-percentage-0'], 1000)
params = [{
'name': 'er_7142303001',
'pieces': 1,
'vat': 24.00,
}]
response = self._verifone_client.build_product_data(params)
self.assertEqual(response['i-t-1-4_bi-discount-percentage-0'], 0)
self.assertEqual(response['i-t-1-4_bi-vat-percentage-0'], 2400)
self.assertEqual(response['i-t-1-11_bi-unit-count-0'], 1)
def test_025_verify_response(self):
""" Test to verify response with extra data. """
response = {
'i-f-1-11_interface-version': '5',
'l-f-1-20_request-id': '2018102613094825567',
'l-f-1-20_response-id': '2018102613094825567',
'l-f-1-20_transaction-number': '2110929913',
's-f-1-10_software-version': '1.74.1.238',
's-f-1-30_operation': 'refund-payment',
's-f-1-30_payment-method-code': 'visa',
's-t-256-256_signature-one': '79C10BC83D94746C2A0859645EB476A73DBE2653C6B24C403CEB9017A759A330F7488AFF549E5AA861E8B6A8962B752B5066651F9C530277ABCAC04C25731EA17B220A638567403035B4A82D6C4CB96DE3F68DF0A089761030CF6766D7811B6895064C90DEC59A796BB3531D5F7C4C3E60B052D3642D35513D29EB89919F8434',
's-t-256-256_signature-two': 'ACB93737CB1DB0D0C7DDCA62DFC921095D2465A751F39F95A9E660B423A4DBF83C7C50914E803019B9884388D336340E18D028F4D58B4C0320EBBC069D0F1402B028ECCB04AD615340670C200062A4C7BDBD2293C44B091E6379B253866BA751BACA133BA58A89125E58DF92E7ABE0E548521565DE05DBAFE5A487F9C9E451B7',
't-f-14-19_response-timestamp': '2018-10-26 10:09:48',
's-t-1-40_shop-order__phase': 'Takaisin tilaukseen'
}
result = self._verifone_client.verify_response(response)
self.assertTrue(result)
def test_026_verify_incorrect_signature(self):
""" Test to verify incorrect signature """
result = self._verifone_client.verify_signature("signature", 'SHA123', "Test")
self.assertFalse(result)
def test_027_check_currency(self):
""" Test that currency is valid """
current_currency = self._verifone_client._currency
new_value = "123"
currency = self._verifone_client.check_currency(new_value)
self.assertEqual(current_currency, currency)
def test_028_process_payment(self):
""" Test to process payment when 's-t-1-30_bi-name-0' data is longer than Verifone accepts """
if (self._test_requests == "1"):
params = {
's-f-1-30_buyer-first-name': 'Test',
's-f-1-30_buyer-last-name': 'Tester',
's-f-1-100_buyer-email-address': os.environ.get('EMAIL'),
's-t-1-30_buyer-phone-number': '123456789',
's-t-1-255_buyer-external-id': os.environ.get('EXTERNALID'),
}
response = self._verifone_client.list_saved_payment_methods(params)
saved_payment_id = response['l-t-1-20_payment-method-id-0']
self.assertIsNotNone(saved_payment_id)
params = {
'locale-f-2-5_payment-locale': 'fi_FI',
's-f-1-36_order-number': '1234',
'l-f-1-20_order-gross-amount': 2391,
's-f-1-30_buyer-first-name': "Test",
's-f-1-30_buyer-last-name': "Tester",
's-t-1-30_buyer-phone-number': 123456789,
's-f-1-100_buyer-email-address': os.environ.get('EMAIL'),
's-t-1-30_delivery-address-line-one': "Test Street 3",
's-t-1-30_delivery-address-city': "Tampere",
's-t-1-30_delivery-address-postal-code': "33210",
'i-t-1-3_delivery-address-country-code': 'FI',
's-t-1-30_bi-name-0': 'Test Product: test with long product name',
'l-t-1-20_bi-unit-gross-cost-0': 2391,
'i-t-1-11_bi-unit-count-0': 1,
'l-t-1-20_bi-gross-amount-0': 2391,
'l-t-1-20_bi-net-amount-0': 1928,
'i-t-1-4_bi-vat-percentage-0': 2400,
'i-t-1-4_bi-discount-percentage-0': 0,
's-t-1-255_buyer-external-id': os.environ.get('EXTERNALID'),
'l-t-1-20_saved-payment-method-id': saved_payment_id,
}
response = self._verifone_client.process_payment(params)
self.assertTrue('l-f-1-20_transaction-number' in response)
self.assertIsNotNone(response['l-f-1-20_transaction-number'])
def test_029_generate_payment_data(self):
""" Test to generate payment data with too long product names """
params = {
'order_number': '58459',
'locale': 'fi_FI',
'first_name': 'Test',
'last_name': 'Tester',
'email': '[email protected]',
'cancel_url': 'https://cancel.url',
'error_url': 'https://error.url',
'expired_url': 'https://expired.url',
'rejected_url': 'https://rejected.url',
'success_url': 'https://success.url',
'success_url_server': 'https://server.success.url',
'skip_confirmation': 1,
'country': '246',
'products': [
{
'name': 'Test Product: test with long product name',
'pieces': 1,
'vat': 24.00,
},
{
'name': 'Test Product 2: test with long product name',
'pieces': 1,
'vat': 24.00,
},
]
}
data = self._verifone_client.generate_payment_data(params)
self.assertEqual(data['s-t-1-30_bi-name-0'], 'Test Product: test with long p')
self.assertEqual(len(data['s-t-1-30_bi-name-0']), 30)
self.assertEqual(data['s-t-1-30_bi-name-1'], 'Test Product 2: test with long')
self.assertEqual(len(data['s-t-1-30_bi-name-1']), 30)
def test_030_generate_payment_data(self):
""" Test to generate payment data when customer id is defined """
params = {
'order_number': '58459',
'locale': 'fi_FI',
'first_name': 'Test',
'last_name': 'Tester',
'email': '[email protected]',
'cancel_url': 'https://cancel.url',
'error_url': 'https://error.url',
'expired_url': 'https://expired.url',
'rejected_url': 'https://rejected.url',
'success_url': 'https://success.url',
'success_url_server': 'https://server.success.url',
'skip_confirmation': 1,
'country': '246',
'customer_id': 'testi123456',
'products': [
{
'name': 'Test Product: test with long product name',
'pieces': 1,
'vat': 24.00,
},
]
}
data = self._verifone_client.generate_payment_data(params)
self.assertEqual(data['s-t-1-255_buyer-external-id'], 'testi123456')
def test_031_generate_payment_data(self):
""" Test to generate payment data when saved payment method id is defined """
params = {
'order_number': '58459',
'locale': 'fi_FI',
'first_name': 'Test',
'last_name': 'Tester',
'email': '[email protected]',
'cancel_url': 'https://cancel.url',
'error_url': 'https://error.url',
'expired_url': 'https://expired.url',
'rejected_url': 'https://rejected.url',
'success_url': 'https://success.url',
'success_url_server': 'https://server.success.url',
'skip_confirmation': 1,
'country': '246',
'customer_id': 'testi123456',
'saved_payment_method_id': 'test123456',
'products': [
{
'name': 'Test Product: test with long product name',
'pieces': 1,
'vat': 24.00,
},
]
}
data = self._verifone_client.generate_payment_data(params)
self.assertEqual(data['l-t-1-20_saved-payment-method-id'], 'test123456')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[] |
[] |
[
"EXTERNALID",
"EMAIL",
"AGREEMENTCODE",
"RSAVERIFONEPUBLICKEY",
"RSAPRIVATEKEY",
"TESTSENDINGREQUEST"
] |
[]
|
["EXTERNALID", "EMAIL", "AGREEMENTCODE", "RSAVERIFONEPUBLICKEY", "RSAPRIVATEKEY", "TESTSENDINGREQUEST"]
|
python
| 6 | 0 | |
python/put-pcep-client.py
|
"""
put-pcep-neighbor.
configures XR to act at PCC towards ODL
parameter:
* ODL IP address
* Peer XR NETCONF node
uses HTTP PUT with JSON payload
"""
import sys
import os
import requests
request_template = '''
{
"peer": [
{
"pce-peer-address": "%s",
"enable": [
null
]
}
]
}
'''
# check args length
if (len(sys.argv) != 3):
print "usage %s ODL_IP_address Peer-NETCONF-Node" % \
sys.argv[0]
sys.exit(1)
odl_user = os.environ.get('ODL_USER', 'admin')
odl_pass = os.environ.get('ODL_PASS', 'admin')
req_hdrs = { 'Content-Type' : 'application/json' }
req_body = request_template % (sys.argv[1])
url = 'http://' + sys.argv[1] + ':8181' + \
'/restconf/config/network-topology:network-topology/topology' + \
'/topology-netconf/node/' + sys.argv[2] + '/yang-ext:mount' + \
'/Cisco-IOS-XR-mpls-te-cfg:mpls-te/global-attributes' + \
'/pce-attributes/peers/peer/' + sys.argv[1]
resp = requests.put(url, data=req_body, headers=req_hdrs, auth=(odl_user, odl_pass))
print resp
|
[] |
[] |
[
"ODL_PASS",
"ODL_USER"
] |
[]
|
["ODL_PASS", "ODL_USER"]
|
python
| 2 | 0 | |
util/pkg/vfs/swiftfs.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vfs
import (
"bytes"
"crypto/tls"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/go-ini/ini"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
swiftcontainer "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers"
swiftobject "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects"
"github.com/gophercloud/gophercloud/pagination"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/homedir"
"k8s.io/klog"
"k8s.io/kops/util/pkg/hashing"
)
func NewSwiftClient() (*gophercloud.ServiceClient, error) {
config := OpenstackConfig{}
// Check if env credentials are valid first
authOption, err := config.GetCredential()
if err != nil {
return nil, err
}
pc, err := openstack.NewClient(authOption.IdentityEndpoint)
if err != nil {
return nil, fmt.Errorf("error building openstack provider client: %v", err)
}
tlsconfig := &tls.Config{}
tlsconfig.InsecureSkipVerify = true
transport := &http.Transport{TLSClientConfig: tlsconfig}
pc.HTTPClient = http.Client{
Transport: transport,
}
klog.V(2).Info("authenticating to keystone")
err = openstack.Authenticate(pc, authOption)
if err != nil {
return nil, fmt.Errorf("error building openstack authenticated client: %v", err)
}
var endpointOpt gophercloud.EndpointOpts
if region, err := config.GetRegion(); err != nil {
klog.Warningf("Retrieving swift configuration from openstack config file: %v", err)
endpointOpt, err = config.GetServiceConfig("Swift")
if err != nil {
return nil, err
}
} else {
endpointOpt = gophercloud.EndpointOpts{
Type: "object-store",
Region: region,
}
}
client, err := openstack.NewObjectStorageV1(pc, endpointOpt)
if err != nil {
return nil, fmt.Errorf("error building swift client: %v", err)
}
return client, nil
}
type OpenstackConfig struct {
}
func (_ OpenstackConfig) filename() (string, error) {
name := os.Getenv("OPENSTACK_CREDENTIAL_FILE")
if name != "" {
klog.V(2).Infof("using openstack config found in $OPENSTACK_CREDENTIAL_FILE: %s", name)
return name, nil
}
homeDir := homedir.HomeDir()
if homeDir == "" {
return "", fmt.Errorf("can not find home directory")
}
f := filepath.Join(homeDir, ".openstack", "config")
klog.V(2).Infof("using openstack config found in %s", f)
return f, nil
}
func (oc OpenstackConfig) getSection(name string, items []string) (map[string]string, error) {
filename, err := oc.filename()
if err != nil {
return nil, err
}
config, err := ini.Load(filename)
if err != nil {
return nil, fmt.Errorf("error loading config file: %v", err)
}
section, err := config.GetSection(name)
if err != nil {
return nil, fmt.Errorf("error getting section of %s: %v", name, err)
}
values := make(map[string]string)
for _, item := range items {
values[item] = section.Key(item).String()
}
return values, nil
}
func (oc OpenstackConfig) GetCredential() (gophercloud.AuthOptions, error) {
// prioritize environment config
env, enverr := openstack.AuthOptionsFromEnv()
if enverr != nil {
klog.Warningf("Could not initialize swift from environment: %v", enverr)
// fallback to config file
return oc.getCredentialFromFile()
}
if env.ApplicationCredentialID != "" && env.Username == "" {
env.Scope = &gophercloud.AuthScope{}
}
return env, nil
}
func (oc OpenstackConfig) GetRegion() (string, error) {
var region string
if region = os.Getenv("OS_REGION_NAME"); region != "" {
if len(region) > 1 {
if region[0] == '\'' && region[len(region)-1] == '\'' {
region = region[1 : len(region)-1]
}
}
return region, nil
}
items := []string{"region"}
// TODO: Unsure if this is the correct section for region
values, err := oc.getSection("Global", items)
if err != nil {
return "", fmt.Errorf("Region not provided in OS_REGION_NAME or openstack config section GLOBAL")
}
return values["region"], nil
}
func (oc OpenstackConfig) getCredentialFromFile() (gophercloud.AuthOptions, error) {
opt := gophercloud.AuthOptions{}
name := "Default"
items := []string{"identity", "user", "user_id", "password", "domain_id", "domain_name", "tenant_id", "tenant_name"}
values, err := oc.getSection(name, items)
if err != nil {
return opt, err
}
for _, c1 := range []string{"identity", "password"} {
if values[c1] == "" {
return opt, fmt.Errorf("missing %s in section of %s", c1, name)
}
}
checkItems := [][]string{{"user", "user_id"}, {"domain_name", "domain_id"}, {"tenant_name", "tenant_id"}}
for _, c2 := range checkItems {
if values[c2[0]] == "" && values[c2[1]] == "" {
return opt, fmt.Errorf("missing %s and %s in section of %s", c2[0], c2[1], name)
}
}
opt.IdentityEndpoint = values["identity"]
opt.UserID = values["user_id"]
opt.Username = values["user"]
opt.Password = values["password"]
opt.TenantID = values["tenant_id"]
opt.TenantName = values["tenant_name"]
opt.DomainID = values["domain_id"]
opt.DomainName = values["domain_name"]
opt.AllowReauth = true
return opt, nil
}
func (oc OpenstackConfig) GetServiceConfig(name string) (gophercloud.EndpointOpts, error) {
opt := gophercloud.EndpointOpts{}
items := []string{"service_type", "service_name", "region", "availability"}
values, err := oc.getSection(name, items)
if err != nil {
return opt, err
}
if values["region"] == "" {
return opt, fmt.Errorf("missing region in section of %s", name)
}
opt.Type = values["service_type"]
opt.Name = values["service_name"]
opt.Region = values["region"]
opt.Availability = gophercloud.Availability(values["availability"])
return opt, nil
}
// SwiftPath is a vfs path for Openstack Cloud Storage.
type SwiftPath struct {
client *gophercloud.ServiceClient
bucket string
key string
hash string
}
var _ Path = &SwiftPath{}
var _ HasHash = &SwiftPath{}
// swiftReadBackoff is the backoff strategy for Swift read retries.
var swiftReadBackoff = wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Jitter: 0.1,
Steps: 4,
}
// swiftWriteBackoff is the backoff strategy for Swift write retries.
var swiftWriteBackoff = wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Jitter: 0.1,
Steps: 5,
}
func NewSwiftPath(client *gophercloud.ServiceClient, bucket string, key string) (*SwiftPath, error) {
bucket = strings.TrimSuffix(bucket, "/")
key = strings.TrimPrefix(key, "/")
return &SwiftPath{
client: client,
bucket: bucket,
key: key,
}, nil
}
func (p *SwiftPath) Path() string {
return "swift://" + p.bucket + "/" + p.key
}
func (p *SwiftPath) Bucket() string {
return p.bucket
}
func (p *SwiftPath) String() string {
return p.Path()
}
func (p *SwiftPath) Remove() error {
done, err := RetryWithBackoff(swiftWriteBackoff, func() (bool, error) {
opt := swiftobject.DeleteOpts{}
_, err := swiftobject.Delete(p.client, p.bucket, p.key, opt).Extract()
if err != nil {
if isSwiftNotFound(err) {
return true, os.ErrNotExist
}
return false, fmt.Errorf("error deleting %s: %v", p, err)
}
return true, nil
})
if err != nil {
return err
} else if done {
return nil
} else {
return wait.ErrWaitTimeout
}
}
func (p *SwiftPath) RemoveAllVersions() error {
return p.Remove()
}
func (p *SwiftPath) Join(relativePath ...string) Path {
args := []string{p.key}
args = append(args, relativePath...)
joined := path.Join(args...)
return &SwiftPath{
client: p.client,
bucket: p.bucket,
key: joined,
}
}
func (p *SwiftPath) WriteFile(data io.ReadSeeker, acl ACL) error {
done, err := RetryWithBackoff(swiftWriteBackoff, func() (bool, error) {
klog.V(4).Infof("Writing file %q", p)
if _, err := data.Seek(0, 0); err != nil {
return false, fmt.Errorf("error seeking to start of data stream for %s: %v", p, err)
}
createOpts := swiftobject.CreateOpts{Content: data}
_, err := swiftobject.Create(p.client, p.bucket, p.key, createOpts).Extract()
if err != nil {
return false, fmt.Errorf("error writing %s: %v", p, err)
}
return true, nil
})
if err != nil {
return err
} else if done {
return nil
} else {
// Shouldn't happen - we always return a non-nil error with false.
return wait.ErrWaitTimeout
}
}
// To prevent concurrent creates on the same file while maintaining atomicity of writes,
// we take a process-wide lock during the operation.
// Not a great approach, but fine for a single process (with low concurrency).
// TODO: should we enable versioning?
var createFileLockSwift sync.Mutex
func (p *SwiftPath) CreateFile(data io.ReadSeeker, acl ACL) error {
createFileLockSwift.Lock()
defer createFileLockSwift.Unlock()
// Check if exists.
_, err := RetryWithBackoff(swiftReadBackoff, func() (bool, error) {
klog.V(4).Infof("Getting file %q", p)
_, err := swiftobject.Get(p.client, p.bucket, p.key, swiftobject.GetOpts{}).Extract()
if err == nil {
return true, nil
} else if isSwiftNotFound(err) {
return true, os.ErrNotExist
} else {
return false, fmt.Errorf("error getting %s: %v", p, err)
}
})
if err == nil {
return os.ErrExist
} else if !os.IsNotExist(err) {
return err
}
err = p.createBucket()
if err != nil {
return err
}
return p.WriteFile(data, acl)
}
func (p *SwiftPath) createBucket() error {
done, err := RetryWithBackoff(swiftWriteBackoff, func() (bool, error) {
_, err := swiftcontainer.Get(p.client, p.bucket, swiftcontainer.GetOpts{}).Extract()
if err == nil {
return true, nil
}
if isSwiftNotFound(err) {
createOpts := swiftcontainer.CreateOpts{}
_, err = swiftcontainer.Create(p.client, p.bucket, createOpts).Extract()
return err == nil, err
}
return false, err
})
if err != nil {
return err
} else if done {
return nil
} else {
// Shouldn't happen - we always return a non-nil error with false.
return wait.ErrWaitTimeout
}
}
// ReadFile implements Path::ReadFile
func (p *SwiftPath) ReadFile() ([]byte, error) {
var b bytes.Buffer
done, err := RetryWithBackoff(swiftReadBackoff, func() (bool, error) {
b.Reset()
_, err := p.WriteTo(&b)
if err != nil {
if os.IsNotExist(err) {
// Not recoverable
return true, err
}
return false, err
}
// Success!
return true, nil
})
if err != nil {
return nil, err
} else if done {
return b.Bytes(), nil
} else {
// Shouldn't happen - we always return a non-nil error with false
return nil, wait.ErrWaitTimeout
}
}
// WriteTo implements io.WriterTo
func (p *SwiftPath) WriteTo(out io.Writer) (int64, error) {
klog.V(4).Infof("Reading file %q", p)
opt := swiftobject.DownloadOpts{}
result := swiftobject.Download(p.client, p.bucket, p.key, opt)
if result.Err != nil {
if isSwiftNotFound(result.Err) {
return 0, os.ErrNotExist
}
return 0, fmt.Errorf("error reading %s: %v", p, result.Err)
}
defer result.Body.Close()
return io.Copy(out, result.Body)
}
func (p *SwiftPath) readPath(opt swiftobject.ListOpts) ([]Path, error) {
var ret []Path
done, err := RetryWithBackoff(swiftReadBackoff, func() (bool, error) {
var paths []Path
pager := swiftobject.List(p.client, p.bucket, opt)
err := pager.EachPage(func(page pagination.Page) (bool, error) {
objects, err1 := swiftobject.ExtractInfo(page)
if err1 != nil {
return false, err1
}
for _, o := range objects {
child := &SwiftPath{
client: p.client,
bucket: p.bucket,
key: o.Name,
hash: o.Hash,
}
paths = append(paths, child)
}
return true, nil
})
if err != nil {
if isSwiftNotFound(err) {
return true, os.ErrNotExist
}
return false, fmt.Errorf("error listing %s: %v", p, err)
}
klog.V(8).Infof("Listed files in %v: %v", p, paths)
ret = paths
return true, nil
})
if err != nil {
return nil, err
} else if done {
return ret, nil
} else {
return nil, wait.ErrWaitTimeout
}
}
// ReadDir implements Path::ReadDir.
func (p *SwiftPath) ReadDir() ([]Path, error) {
prefix := p.key
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
opt := swiftobject.ListOpts{
Full: true,
Path: prefix,
}
return p.readPath(opt)
}
// ReadTree implements Path::ReadTree.
func (p *SwiftPath) ReadTree() ([]Path, error) {
prefix := p.key
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
opt := swiftobject.ListOpts{
Full: true,
Prefix: prefix,
}
return p.readPath(opt)
}
func (p *SwiftPath) Base() string {
return path.Base(p.key)
}
func (p *SwiftPath) PreferredHash() (*hashing.Hash, error) {
return p.Hash(hashing.HashAlgorithmMD5)
}
func (p *SwiftPath) Hash(a hashing.HashAlgorithm) (*hashing.Hash, error) {
if a != hashing.HashAlgorithmMD5 {
return nil, nil
}
md5 := p.hash
if md5 == "" {
return nil, nil
}
md5Bytes, err := hex.DecodeString(md5)
if err != nil {
return nil, fmt.Errorf("Etag was not a valid MD5 sum: %q", md5)
}
return &hashing.Hash{Algorithm: hashing.HashAlgorithmMD5, HashValue: md5Bytes}, nil
}
func isSwiftNotFound(err error) bool {
if err == nil {
return false
}
_, ok := err.(gophercloud.ErrDefault404)
return ok
}
|
[
"\"OPENSTACK_CREDENTIAL_FILE\"",
"\"OS_REGION_NAME\""
] |
[] |
[
"OS_REGION_NAME",
"OPENSTACK_CREDENTIAL_FILE"
] |
[]
|
["OS_REGION_NAME", "OPENSTACK_CREDENTIAL_FILE"]
|
go
| 2 | 0 | |
clifford/__init__.py
|
"""
.. currentmodule:: clifford
========================================
clifford (:mod:`clifford`)
========================================
The top-level module.
Provides two core classes, :class:`Layout` and :class:`MultiVector`, along with several helper functions to implement the algebras.
Constructing algebras
=====================
Note that typically the :doc:`/predefined-algebras` are sufficient, and there is no need to build an algebra from scratch.
.. autosummary::
:toctree:
Cl
conformalize
Whether you construct your algebras from scratch, or use the predefined ones, you'll end up working with the following types:
.. autosummary::
:toctree:
MultiVector
Layout
ConformalLayout
Advanced algebra configuration
------------------------------
It is unlikely you will need these features, but they remain as a better
spelling for features which have always been in ``clifford``.
.. autosummary::
:toctree: generated/
BasisBladeOrder
BasisVectorIds
Global configuration functions
==============================
These functions are used to change the global behavior of ``clifford``.
.. autofunction:: eps
.. autofunction:: pretty
.. autofunction:: ugly
.. autofunction:: print_precision
Miscellaneous classes
=======================
.. autosummary::
:toctree:
MVArray
Frame
BladeMap
Miscellaneous functions
=======================
.. autosummary::
:toctree:
grade_obj
randomMV
"""
# Standard library imports.
import os
import itertools
import warnings
from typing import List, Tuple, Set, Dict
# Major library imports.
import numpy as np
import numba as _numba # to avoid clashing with clifford.numba
import sparse
try:
from numba.np import numpy_support as _numpy_support
except ImportError:
import numba.numpy_support as _numpy_support
from clifford.io import write_ga_file, read_ga_file # noqa: F401
from ._version import __version__ # noqa: F401
from . import _numba_utils
from ._settings import pretty, ugly, eps, print_precision # noqa: F401
import clifford.taylor_expansions as taylor_expansions
# For backwards-compatibility. New code should import directly from `clifford.operator`
from .operator import gp, op, ip # noqa: F401
try:
NUMBA_DISABLE_PARALLEL = os.environ['NUMBA_DISABLE_PARALLEL']
except KeyError:
NUMBA_PARALLEL = True
else:
NUMBA_PARALLEL = not bool(NUMBA_DISABLE_PARALLEL)
def general_exp(x, **kwargs):
warnings.warn("cf.general_exp is deprecated. Use `mv.exp()` or `np.exp(mv)` on multivectors, or `cf.taylor_expansions.exp(x)` on arbitrary objects", DeprecationWarning, stacklevel=2)
return taylor_expansions.exp(x, **kwargs)
def linear_operator_as_matrix(func, input_blades, output_blades):
"""
Return a matrix that performs the operation of the provided linear
operator function func mapping the input blades to the output blades
"""
ndimin = len(input_blades)
ndimout = len(output_blades)
mat = np.zeros((ndimout, ndimin))
for i, b in enumerate(input_blades):
b_result = func(b)
mat[:, i] = np.array([b_result[j] for j in output_blades])
return mat
def get_mult_function(mt: sparse.COO, gradeList,
grades_a=None, grades_b=None, filter_mask=None):
'''
Returns a function that implements the mult_table on two input multivectors
'''
if (filter_mask is None) and (grades_a is not None) and (grades_b is not None):
# If not specified explicitly, we can specify sparseness by grade
filter_mask = np.zeros(mt.nnz, dtype=bool)
k_list, _, m_list = mt.coords
for i in range(len(filter_mask)):
if gradeList[k_list[i]] in grades_a:
if gradeList[m_list[i]] in grades_b:
filter_mask[i] = 1
filter_mask = sparse.COO(coords=mt.coords, data=filter_mask, shape=mt.shape)
if filter_mask is not None:
# We can pass the sparse filter mask directly
mt = sparse.where(filter_mask, mt, mt.dtype.type(0))
return _get_mult_function(mt)
else:
return _get_mult_function_runtime_sparse(mt)
def _get_mult_function_result_type(a: _numba.types.Type, b: _numba.types.Type, mt: np.dtype):
a_dt = _numpy_support.as_dtype(getattr(a, 'dtype', a))
b_dt = _numpy_support.as_dtype(getattr(b, 'dtype', b))
return np.result_type(a_dt, mt, b_dt)
def _get_mult_function(mt: sparse.COO):
"""
Get a function similar to `` lambda a, b: np.einsum('i,ijk,k->j', a, mt, b)``
Returns
-------
func : function (array_like (n_dims,), array_like (n_dims,)) -> array_like (n_dims,)
A function that computes the appropriate multiplication
"""
# unpack for numba
dims = mt.shape[1]
k_list, l_list, m_list = mt.coords
mult_table_vals = mt.data
@_numba_utils.generated_jit(nopython=True)
def mv_mult(value, other_value):
# this casting will be done at jit-time
ret_dtype = _get_mult_function_result_type(value, other_value, mult_table_vals.dtype)
mult_table_vals_t = mult_table_vals.astype(ret_dtype)
def mult_inner(value, other_value):
output = np.zeros(dims, dtype=ret_dtype)
for k, l, m, val in zip(k_list, l_list, m_list, mult_table_vals_t):
output[l] += value[k] * val * other_value[m]
return output
return mult_inner
return mv_mult
def _get_mult_function_runtime_sparse(mt: sparse.COO):
"""
A variant of `_get_mult_function` that attempts to exploit runtime zeros
The returned function avoids performing multiplications if vectors contain
zeros.
TODO: determine if this actually helps.
"""
# unpack for numba
dims = mt.shape[1]
k_list, l_list, m_list = mt.coords
mult_table_vals = mt.data
@_numba_utils.generated_jit(nopython=True)
def mv_mult(value, other_value):
# this casting will be done at jit-time
ret_dtype = _get_mult_function_result_type(value, other_value, mult_table_vals.dtype)
mult_table_vals_t = mult_table_vals.astype(ret_dtype)
def mult_inner(value, other_value):
output = np.zeros(dims, dtype=ret_dtype)
for ind, k in enumerate(k_list):
v_val = value[k]
if v_val != 0.0:
m = m_list[ind]
ov_val = other_value[m]
if ov_val != 0.0:
l = l_list[ind]
output[l] += v_val * mult_table_vals_t[ind] * ov_val
return output
return mult_inner
return mv_mult
@_numba_utils.njit
def grade_obj_func(objin_val, gradeList, threshold):
""" returns the modal grade of a multivector """
modal_value_count = np.zeros(objin_val.shape)
n = 0
for g in gradeList:
if np.abs(objin_val[n]) > threshold:
modal_value_count[g] += 1
n += 1
return np.argmax(modal_value_count)
def grade_obj(objin, threshold=0.0000001):
'''
Returns the modal grade of a multivector
'''
return grade_obj_func(objin.value, objin.layout._basis_blade_order.grades, threshold)
def grades_present(objin: 'MultiVector', threshold=0.0000001) -> Set[int]:
# for backwards compatibility
warnings.warn(
"`clifford.grades_present(x)` is deprecated, use `x.grades()` instead. "
"Note that the method uses `clifford.eps()` as the default tolerance.",
DeprecationWarning, stacklevel=2)
return objin.grades(eps=threshold)
# todo: work out how to let numba use the COO objects directly
@_numba_utils.njit
def _numba_val_get_left_gmt_matrix(x, k_list, l_list, m_list, mult_table_vals, ndims):
# TODO: consider `dtype=result_type(x.dtype, mult_table_vals.dtype)`
intermed = np.zeros((ndims, ndims), dtype=x.dtype)
test_ind = 0
for k in k_list:
j = l_list[test_ind]
i = m_list[test_ind]
intermed[j, i] += mult_table_vals[test_ind] * x[k]
test_ind = test_ind + 1
return intermed
def val_get_left_gmt_matrix(mt: sparse.COO, x):
"""
This produces the matrix X that performs left multiplication with x
eg. X@b == (x*b).value
"""
dims = mt.shape[1]
k_list, l_list, m_list = mt.coords
return _numba_val_get_left_gmt_matrix(
x, k_list, l_list, m_list, mt.data, dims
)
def val_get_right_gmt_matrix(mt: sparse.COO, x):
"""
This produces the matrix X that performs right multiplication with x
eg. X@b == (b*x).value
"""
return val_get_left_gmt_matrix(mt.T, x)
# TODO: Move this to the top once we remove circular imports
from ._layout import Layout # noqa: E402
from ._multivector import MultiVector # noqa: E402
from ._conformal_layout import ConformalLayout # noqa: E402
from ._layout_helpers import BasisVectorIds, BasisBladeOrder # noqa: F401
from ._mvarray import MVArray, array # noqa: F401
from ._frame import Frame # noqa: F401
# this registers the extension type
from . import numba # noqa: F401
from ._blademap import BladeMap # noqa: F401
# copied from the itertools docs
def _powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1)
)
def elements(dims: int, firstIdx=0) -> List[Tuple[int, ...]]:
"""Return a list of tuples representing all 2**dims of blades
in a dims-dimensional GA.
Elements are sorted lexicographically.
"""
return list(_powerset(range(firstIdx, firstIdx + dims)))
def Cl(p: int = 0, q: int = 0, r: int = 0, sig=None, names=None, firstIdx=1,
mvClass=MultiVector) -> Tuple[Layout, Dict[str, MultiVector]]:
r"""Returns a :class:`Layout` and basis blade :class:`MultiVector`\ s for the geometric algebra :math:`Cl_{p,q,r}`.
The notation :math:`Cl_{p,q,r}` means that the algebra is :math:`p+q+r`-dimensional, with the first :math:`p` vectors with positive signature, the next :math:`q` vectors negative, and the final :math:`r` vectors with null signature.
Parameters
----------
p : int
number of positive-signature basis vectors
q : int
number of negative-signature basis vectors
r : int
number of zero-signature basis vectors
sig
See the docs for :class:`clifford.Layout`. If ``sig`` is passed, then
`p`, `q`, and `r` are ignored.
names, firstIdx
See the docs for :class:`clifford.Layout`.
Returns
=======
layout : Layout
The resulting layout
blades : Dict[str, MultiVector]
The blades of the returned layout, equivalent to ``layout.blades``.
"""
if sig is None:
layout = Layout._from_Cl(p, q, r, firstIdx=firstIdx, names=names)
else:
layout = Layout._from_sig(sig, firstIdx=firstIdx, names=names)
return layout, layout.bases(mvClass=mvClass)
def bases(layout, *args, **kwargs):
return layout.bases(*args, **kwargs)
def basis_vectors(layout):
return layout.basis_vectors
def randomMV(
layout: Layout, min=-2.0, max=2.0, grades=None, mvClass=MultiVector,
uniform=None, n=1, normed: bool = False, rng=None):
"""n Random MultiVectors with given layout.
Coefficients are between min and max, and if grades is a list of integers,
only those grades will be non-zero.
Parameters
------------
layout : Layout
the layout
min, max : Number
range of values from which to uniformly sample coefficients
grades : int, List[int]
grades which should have non-zero coefficients. If ``None``, defaults to
all grades. A single integer is treated as a list of one integers.
uniform : Callable[[Number, Number, Tuple[int, ...]], np.ndarray]
A function like `np.random.uniform`. Defaults to ``rng.uniform``.
n : int
The number of samples to generate. If ``n > 1``, this function
returns a list instead of a single multivector
normed : bool
If true, call :meth:`MultiVector.normal` on each multivector. Note
that this does not result in a uniform sampling of directions.
rng :
The random number state to use. Typical use would be to construct a
generator with :func:`numpy.random.default_rng`.
Examples
--------
>>> randomMV(layout, min=-2.0, max=2.0, grades=None, uniform=None, n=2) # doctest: +SKIP
"""
if n > 1:
# return many multivectors
return [randomMV(layout=layout, min=min, max=max, grades=grades,
mvClass=mvClass, uniform=uniform, n=1,
normed=normed) for k in range(n)]
if uniform is None:
rng = np.random.default_rng(rng)
uniform = rng.uniform
if grades is None:
mv = mvClass(layout, uniform(min, max, (layout.gaDims,)))
else:
if isinstance(grades, int):
grades = [grades]
newValue = np.zeros((layout.gaDims,))
for i in range(layout.gaDims):
if layout._basis_blade_order.grades[i] in grades:
newValue[i] = uniform(min, max)
mv = mvClass(layout, newValue)
if normed:
mv = mv.normal()
return mv
def conformalize(layout: Layout, added_sig=[1, -1], *, mvClass=MultiVector, **kwargs):
'''
Conformalize a Geometric Algebra
Given the `Layout` for a GA of signature (p, q), this
will produce a GA of signature (p+1, q+1), as well as
return a new list of blades and some `stuff`. `stuff`
is a dict containing the null basis blades, and some
up/down functions for projecting in/out of the CGA.
Parameters
-------------
layout: `clifford.Layout`
layout of the GA to conformalize (the base)
added_sig: list-like
list of +1, -1 denoted the added signatures
**kwargs :
extra arguments to pass on into the :class:`Layout` constructor.
Returns
---------
layout_c : :class:`ConformalLayout`
layout of the base GA
blades_c : dict
blades for the CGA
stuff: dict
dict mapping the following members of :class:`ConformalLayout` by their
names, for easy unpacking into the global namespace:
.. autosummary::
~ConformalLayout.ep
~ConformalLayout.en
~ConformalLayout.eo
~ConformalLayout.einf
~ConformalLayout.E0
~ConformalLayout.I_base
~ConformalLayout.up
~ConformalLayout.down
~ConformalLayout.homo
Examples
---------
>>> from clifford import Cl, conformalize
>>> G2, blades = Cl(2)
>>> G2c, bladesc, stuff = conformalize(G2)
>>> locals().update(bladesc)
>>> locals().update(stuff)
'''
layout_c = ConformalLayout._from_base_layout(layout, added_sig, **kwargs)
stuff = {
attr: getattr(layout_c, attr)
for attr in [
"ep", "en", "eo", "einf", "E0",
"up", "down", "homo", "I_base",
]
}
return layout_c, layout_c.bases(mvClass=mvClass), stuff
# TODO: fix caching to work
# generate pre-defined algebras and cache them
# sigs = [(1, 1, 0), (2, 0, 0), (3, 1, 0), (3, 0, 0), (3, 2, 0), (4, 0, 0)]
# current_module = sys.modules[__name__]
# caching.build_or_read_cache_and_attach_submods(current_module, sigs=sigs)
|
[] |
[] |
[
"NUMBA_DISABLE_PARALLEL"
] |
[]
|
["NUMBA_DISABLE_PARALLEL"]
|
python
| 1 | 0 | |
examples/configmap/listAll/listAllConfigmaps.go
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"os"
"github.com/thekubeworld/k8devel/pkg/client"
"github.com/thekubeworld/k8devel/pkg/configmap"
"github.com/thekubeworld/k8devel/pkg/emoji"
)
func main() {
e := emoji.LoadEmojis()
c := client.Client{}
c.NumberMaxOfAttemptsPerTask = 10
c.TimeoutTaskInSec = 2
// Connect to cluster from:
// - $HOME/kubeconfig (Linux)
// - os.Getenv("USERPROFILE") (Windows)
c.Connect()
configMapList, err := configmap.ListAll(&c)
if err != nil {
fmt.Printf("%s\n", err)
os.Exit(1)
}
for _, cm := range configMapList.Items {
fmt.Printf("Name %s\n", cm.ObjectMeta.Name)
fmt.Printf("Namespace: %s\n", cm.ObjectMeta.Namespace)
for _, d := range cm.Data {
fmt.Printf("%s", d)
}
fmt.Printf("\n")
}
fmt.Printf("Number total if configmaps: %v %s %s\n",
len(configMapList.Items),
emoji.Show(e.Rocket),
emoji.Show(e.Collision))
}
|
[
"\"USERPROFILE\""
] |
[] |
[
"USERPROFILE"
] |
[]
|
["USERPROFILE"]
|
go
| 1 | 0 | |
services/api/store/mocks/mock.go
|
// Code generated by MockGen. DO NOT EDIT.
// Source: store.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
database "github.com/consensys/orchestrate/pkg/toolkit/database"
entities "github.com/consensys/orchestrate/pkg/types/entities"
store "github.com/consensys/orchestrate/services/api/store"
models "github.com/consensys/orchestrate/services/api/store/models"
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)
// MockStore is a mock of Store interface
type MockStore struct {
ctrl *gomock.Controller
recorder *MockStoreMockRecorder
}
// MockStoreMockRecorder is the mock recorder for MockStore
type MockStoreMockRecorder struct {
mock *MockStore
}
// NewMockStore creates a new mock instance
func NewMockStore(ctrl *gomock.Controller) *MockStore {
mock := &MockStore{ctrl: ctrl}
mock.recorder = &MockStoreMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockStore) EXPECT() *MockStoreMockRecorder {
return m.recorder
}
// Connect mocks base method
func (m *MockStore) Connect(ctx context.Context, conf interface{}) (store.DB, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Connect", ctx, conf)
ret0, _ := ret[0].(store.DB)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Connect indicates an expected call of Connect
func (mr *MockStoreMockRecorder) Connect(ctx, conf interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockStore)(nil).Connect), ctx, conf)
}
// MockAgents is a mock of Agents interface
type MockAgents struct {
ctrl *gomock.Controller
recorder *MockAgentsMockRecorder
}
// MockAgentsMockRecorder is the mock recorder for MockAgents
type MockAgentsMockRecorder struct {
mock *MockAgents
}
// NewMockAgents creates a new mock instance
func NewMockAgents(ctrl *gomock.Controller) *MockAgents {
mock := &MockAgents{ctrl: ctrl}
mock.recorder = &MockAgentsMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockAgents) EXPECT() *MockAgentsMockRecorder {
return m.recorder
}
// Schedule mocks base method
func (m *MockAgents) Schedule() store.ScheduleAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Schedule")
ret0, _ := ret[0].(store.ScheduleAgent)
return ret0
}
// Schedule indicates an expected call of Schedule
func (mr *MockAgentsMockRecorder) Schedule() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Schedule", reflect.TypeOf((*MockAgents)(nil).Schedule))
}
// Job mocks base method
func (m *MockAgents) Job() store.JobAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Job")
ret0, _ := ret[0].(store.JobAgent)
return ret0
}
// Job indicates an expected call of Job
func (mr *MockAgentsMockRecorder) Job() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Job", reflect.TypeOf((*MockAgents)(nil).Job))
}
// Log mocks base method
func (m *MockAgents) Log() store.LogAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Log")
ret0, _ := ret[0].(store.LogAgent)
return ret0
}
// Log indicates an expected call of Log
func (mr *MockAgentsMockRecorder) Log() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Log", reflect.TypeOf((*MockAgents)(nil).Log))
}
// Transaction mocks base method
func (m *MockAgents) Transaction() store.TransactionAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Transaction")
ret0, _ := ret[0].(store.TransactionAgent)
return ret0
}
// Transaction indicates an expected call of Transaction
func (mr *MockAgentsMockRecorder) Transaction() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Transaction", reflect.TypeOf((*MockAgents)(nil).Transaction))
}
// TransactionRequest mocks base method
func (m *MockAgents) TransactionRequest() store.TransactionRequestAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TransactionRequest")
ret0, _ := ret[0].(store.TransactionRequestAgent)
return ret0
}
// TransactionRequest indicates an expected call of TransactionRequest
func (mr *MockAgentsMockRecorder) TransactionRequest() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionRequest", reflect.TypeOf((*MockAgents)(nil).TransactionRequest))
}
// Account mocks base method
func (m *MockAgents) Account() store.AccountAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Account")
ret0, _ := ret[0].(store.AccountAgent)
return ret0
}
// Account indicates an expected call of Account
func (mr *MockAgentsMockRecorder) Account() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Account", reflect.TypeOf((*MockAgents)(nil).Account))
}
// Faucet mocks base method
func (m *MockAgents) Faucet() store.FaucetAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Faucet")
ret0, _ := ret[0].(store.FaucetAgent)
return ret0
}
// Faucet indicates an expected call of Faucet
func (mr *MockAgentsMockRecorder) Faucet() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Faucet", reflect.TypeOf((*MockAgents)(nil).Faucet))
}
// Artifact mocks base method
func (m *MockAgents) Artifact() store.ArtifactAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Artifact")
ret0, _ := ret[0].(store.ArtifactAgent)
return ret0
}
// Artifact indicates an expected call of Artifact
func (mr *MockAgentsMockRecorder) Artifact() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Artifact", reflect.TypeOf((*MockAgents)(nil).Artifact))
}
// CodeHash mocks base method
func (m *MockAgents) CodeHash() store.CodeHashAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CodeHash")
ret0, _ := ret[0].(store.CodeHashAgent)
return ret0
}
// CodeHash indicates an expected call of CodeHash
func (mr *MockAgentsMockRecorder) CodeHash() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CodeHash", reflect.TypeOf((*MockAgents)(nil).CodeHash))
}
// Event mocks base method
func (m *MockAgents) Event() store.EventAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Event")
ret0, _ := ret[0].(store.EventAgent)
return ret0
}
// Event indicates an expected call of Event
func (mr *MockAgentsMockRecorder) Event() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Event", reflect.TypeOf((*MockAgents)(nil).Event))
}
// Method mocks base method
func (m *MockAgents) Method() store.MethodAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Method")
ret0, _ := ret[0].(store.MethodAgent)
return ret0
}
// Method indicates an expected call of Method
func (mr *MockAgentsMockRecorder) Method() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Method", reflect.TypeOf((*MockAgents)(nil).Method))
}
// Repository mocks base method
func (m *MockAgents) Repository() store.RepositoryAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Repository")
ret0, _ := ret[0].(store.RepositoryAgent)
return ret0
}
// Repository indicates an expected call of Repository
func (mr *MockAgentsMockRecorder) Repository() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repository", reflect.TypeOf((*MockAgents)(nil).Repository))
}
// Tag mocks base method
func (m *MockAgents) Tag() store.TagAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Tag")
ret0, _ := ret[0].(store.TagAgent)
return ret0
}
// Tag indicates an expected call of Tag
func (mr *MockAgentsMockRecorder) Tag() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tag", reflect.TypeOf((*MockAgents)(nil).Tag))
}
// Chain mocks base method
func (m *MockAgents) Chain() store.ChainAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Chain")
ret0, _ := ret[0].(store.ChainAgent)
return ret0
}
// Chain indicates an expected call of Chain
func (mr *MockAgentsMockRecorder) Chain() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chain", reflect.TypeOf((*MockAgents)(nil).Chain))
}
// PrivateTxManager mocks base method
func (m *MockAgents) PrivateTxManager() store.PrivateTxManagerAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PrivateTxManager")
ret0, _ := ret[0].(store.PrivateTxManagerAgent)
return ret0
}
// PrivateTxManager indicates an expected call of PrivateTxManager
func (mr *MockAgentsMockRecorder) PrivateTxManager() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrivateTxManager", reflect.TypeOf((*MockAgents)(nil).PrivateTxManager))
}
// MockDB is a mock of DB interface
type MockDB struct {
ctrl *gomock.Controller
recorder *MockDBMockRecorder
}
// MockDBMockRecorder is the mock recorder for MockDB
type MockDBMockRecorder struct {
mock *MockDB
}
// NewMockDB creates a new mock instance
func NewMockDB(ctrl *gomock.Controller) *MockDB {
mock := &MockDB{ctrl: ctrl}
mock.recorder = &MockDBMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockDB) EXPECT() *MockDBMockRecorder {
return m.recorder
}
// Begin mocks base method
func (m *MockDB) Begin() (database.Tx, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Begin")
ret0, _ := ret[0].(database.Tx)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Begin indicates an expected call of Begin
func (mr *MockDBMockRecorder) Begin() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Begin", reflect.TypeOf((*MockDB)(nil).Begin))
}
// Schedule mocks base method
func (m *MockDB) Schedule() store.ScheduleAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Schedule")
ret0, _ := ret[0].(store.ScheduleAgent)
return ret0
}
// Schedule indicates an expected call of Schedule
func (mr *MockDBMockRecorder) Schedule() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Schedule", reflect.TypeOf((*MockDB)(nil).Schedule))
}
// Job mocks base method
func (m *MockDB) Job() store.JobAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Job")
ret0, _ := ret[0].(store.JobAgent)
return ret0
}
// Job indicates an expected call of Job
func (mr *MockDBMockRecorder) Job() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Job", reflect.TypeOf((*MockDB)(nil).Job))
}
// Log mocks base method
func (m *MockDB) Log() store.LogAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Log")
ret0, _ := ret[0].(store.LogAgent)
return ret0
}
// Log indicates an expected call of Log
func (mr *MockDBMockRecorder) Log() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Log", reflect.TypeOf((*MockDB)(nil).Log))
}
// Transaction mocks base method
func (m *MockDB) Transaction() store.TransactionAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Transaction")
ret0, _ := ret[0].(store.TransactionAgent)
return ret0
}
// Transaction indicates an expected call of Transaction
func (mr *MockDBMockRecorder) Transaction() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Transaction", reflect.TypeOf((*MockDB)(nil).Transaction))
}
// TransactionRequest mocks base method
func (m *MockDB) TransactionRequest() store.TransactionRequestAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TransactionRequest")
ret0, _ := ret[0].(store.TransactionRequestAgent)
return ret0
}
// TransactionRequest indicates an expected call of TransactionRequest
func (mr *MockDBMockRecorder) TransactionRequest() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionRequest", reflect.TypeOf((*MockDB)(nil).TransactionRequest))
}
// Account mocks base method
func (m *MockDB) Account() store.AccountAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Account")
ret0, _ := ret[0].(store.AccountAgent)
return ret0
}
// Account indicates an expected call of Account
func (mr *MockDBMockRecorder) Account() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Account", reflect.TypeOf((*MockDB)(nil).Account))
}
// Faucet mocks base method
func (m *MockDB) Faucet() store.FaucetAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Faucet")
ret0, _ := ret[0].(store.FaucetAgent)
return ret0
}
// Faucet indicates an expected call of Faucet
func (mr *MockDBMockRecorder) Faucet() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Faucet", reflect.TypeOf((*MockDB)(nil).Faucet))
}
// Artifact mocks base method
func (m *MockDB) Artifact() store.ArtifactAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Artifact")
ret0, _ := ret[0].(store.ArtifactAgent)
return ret0
}
// Artifact indicates an expected call of Artifact
func (mr *MockDBMockRecorder) Artifact() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Artifact", reflect.TypeOf((*MockDB)(nil).Artifact))
}
// CodeHash mocks base method
func (m *MockDB) CodeHash() store.CodeHashAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CodeHash")
ret0, _ := ret[0].(store.CodeHashAgent)
return ret0
}
// CodeHash indicates an expected call of CodeHash
func (mr *MockDBMockRecorder) CodeHash() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CodeHash", reflect.TypeOf((*MockDB)(nil).CodeHash))
}
// Event mocks base method
func (m *MockDB) Event() store.EventAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Event")
ret0, _ := ret[0].(store.EventAgent)
return ret0
}
// Event indicates an expected call of Event
func (mr *MockDBMockRecorder) Event() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Event", reflect.TypeOf((*MockDB)(nil).Event))
}
// Method mocks base method
func (m *MockDB) Method() store.MethodAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Method")
ret0, _ := ret[0].(store.MethodAgent)
return ret0
}
// Method indicates an expected call of Method
func (mr *MockDBMockRecorder) Method() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Method", reflect.TypeOf((*MockDB)(nil).Method))
}
// Repository mocks base method
func (m *MockDB) Repository() store.RepositoryAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Repository")
ret0, _ := ret[0].(store.RepositoryAgent)
return ret0
}
// Repository indicates an expected call of Repository
func (mr *MockDBMockRecorder) Repository() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repository", reflect.TypeOf((*MockDB)(nil).Repository))
}
// Tag mocks base method
func (m *MockDB) Tag() store.TagAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Tag")
ret0, _ := ret[0].(store.TagAgent)
return ret0
}
// Tag indicates an expected call of Tag
func (mr *MockDBMockRecorder) Tag() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tag", reflect.TypeOf((*MockDB)(nil).Tag))
}
// Chain mocks base method
func (m *MockDB) Chain() store.ChainAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Chain")
ret0, _ := ret[0].(store.ChainAgent)
return ret0
}
// Chain indicates an expected call of Chain
func (mr *MockDBMockRecorder) Chain() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chain", reflect.TypeOf((*MockDB)(nil).Chain))
}
// PrivateTxManager mocks base method
func (m *MockDB) PrivateTxManager() store.PrivateTxManagerAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PrivateTxManager")
ret0, _ := ret[0].(store.PrivateTxManagerAgent)
return ret0
}
// PrivateTxManager indicates an expected call of PrivateTxManager
func (mr *MockDBMockRecorder) PrivateTxManager() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrivateTxManager", reflect.TypeOf((*MockDB)(nil).PrivateTxManager))
}
// MockTx is a mock of Tx interface
type MockTx struct {
ctrl *gomock.Controller
recorder *MockTxMockRecorder
}
// MockTxMockRecorder is the mock recorder for MockTx
type MockTxMockRecorder struct {
mock *MockTx
}
// NewMockTx creates a new mock instance
func NewMockTx(ctrl *gomock.Controller) *MockTx {
mock := &MockTx{ctrl: ctrl}
mock.recorder = &MockTxMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockTx) EXPECT() *MockTxMockRecorder {
return m.recorder
}
// Begin mocks base method
func (m *MockTx) Begin() (database.Tx, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Begin")
ret0, _ := ret[0].(database.Tx)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Begin indicates an expected call of Begin
func (mr *MockTxMockRecorder) Begin() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Begin", reflect.TypeOf((*MockTx)(nil).Begin))
}
// Commit mocks base method
func (m *MockTx) Commit() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Commit")
ret0, _ := ret[0].(error)
return ret0
}
// Commit indicates an expected call of Commit
func (mr *MockTxMockRecorder) Commit() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockTx)(nil).Commit))
}
// Rollback mocks base method
func (m *MockTx) Rollback() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Rollback")
ret0, _ := ret[0].(error)
return ret0
}
// Rollback indicates an expected call of Rollback
func (mr *MockTxMockRecorder) Rollback() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rollback", reflect.TypeOf((*MockTx)(nil).Rollback))
}
// Close mocks base method
func (m *MockTx) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close
func (mr *MockTxMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockTx)(nil).Close))
}
// Schedule mocks base method
func (m *MockTx) Schedule() store.ScheduleAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Schedule")
ret0, _ := ret[0].(store.ScheduleAgent)
return ret0
}
// Schedule indicates an expected call of Schedule
func (mr *MockTxMockRecorder) Schedule() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Schedule", reflect.TypeOf((*MockTx)(nil).Schedule))
}
// Job mocks base method
func (m *MockTx) Job() store.JobAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Job")
ret0, _ := ret[0].(store.JobAgent)
return ret0
}
// Job indicates an expected call of Job
func (mr *MockTxMockRecorder) Job() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Job", reflect.TypeOf((*MockTx)(nil).Job))
}
// Log mocks base method
func (m *MockTx) Log() store.LogAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Log")
ret0, _ := ret[0].(store.LogAgent)
return ret0
}
// Log indicates an expected call of Log
func (mr *MockTxMockRecorder) Log() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Log", reflect.TypeOf((*MockTx)(nil).Log))
}
// Transaction mocks base method
func (m *MockTx) Transaction() store.TransactionAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Transaction")
ret0, _ := ret[0].(store.TransactionAgent)
return ret0
}
// Transaction indicates an expected call of Transaction
func (mr *MockTxMockRecorder) Transaction() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Transaction", reflect.TypeOf((*MockTx)(nil).Transaction))
}
// TransactionRequest mocks base method
func (m *MockTx) TransactionRequest() store.TransactionRequestAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TransactionRequest")
ret0, _ := ret[0].(store.TransactionRequestAgent)
return ret0
}
// TransactionRequest indicates an expected call of TransactionRequest
func (mr *MockTxMockRecorder) TransactionRequest() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionRequest", reflect.TypeOf((*MockTx)(nil).TransactionRequest))
}
// Account mocks base method
func (m *MockTx) Account() store.AccountAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Account")
ret0, _ := ret[0].(store.AccountAgent)
return ret0
}
// Account indicates an expected call of Account
func (mr *MockTxMockRecorder) Account() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Account", reflect.TypeOf((*MockTx)(nil).Account))
}
// Faucet mocks base method
func (m *MockTx) Faucet() store.FaucetAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Faucet")
ret0, _ := ret[0].(store.FaucetAgent)
return ret0
}
// Faucet indicates an expected call of Faucet
func (mr *MockTxMockRecorder) Faucet() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Faucet", reflect.TypeOf((*MockTx)(nil).Faucet))
}
// Artifact mocks base method
func (m *MockTx) Artifact() store.ArtifactAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Artifact")
ret0, _ := ret[0].(store.ArtifactAgent)
return ret0
}
// Artifact indicates an expected call of Artifact
func (mr *MockTxMockRecorder) Artifact() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Artifact", reflect.TypeOf((*MockTx)(nil).Artifact))
}
// CodeHash mocks base method
func (m *MockTx) CodeHash() store.CodeHashAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CodeHash")
ret0, _ := ret[0].(store.CodeHashAgent)
return ret0
}
// CodeHash indicates an expected call of CodeHash
func (mr *MockTxMockRecorder) CodeHash() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CodeHash", reflect.TypeOf((*MockTx)(nil).CodeHash))
}
// Event mocks base method
func (m *MockTx) Event() store.EventAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Event")
ret0, _ := ret[0].(store.EventAgent)
return ret0
}
// Event indicates an expected call of Event
func (mr *MockTxMockRecorder) Event() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Event", reflect.TypeOf((*MockTx)(nil).Event))
}
// Method mocks base method
func (m *MockTx) Method() store.MethodAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Method")
ret0, _ := ret[0].(store.MethodAgent)
return ret0
}
// Method indicates an expected call of Method
func (mr *MockTxMockRecorder) Method() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Method", reflect.TypeOf((*MockTx)(nil).Method))
}
// Repository mocks base method
func (m *MockTx) Repository() store.RepositoryAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Repository")
ret0, _ := ret[0].(store.RepositoryAgent)
return ret0
}
// Repository indicates an expected call of Repository
func (mr *MockTxMockRecorder) Repository() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repository", reflect.TypeOf((*MockTx)(nil).Repository))
}
// Tag mocks base method
func (m *MockTx) Tag() store.TagAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Tag")
ret0, _ := ret[0].(store.TagAgent)
return ret0
}
// Tag indicates an expected call of Tag
func (mr *MockTxMockRecorder) Tag() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tag", reflect.TypeOf((*MockTx)(nil).Tag))
}
// Chain mocks base method
func (m *MockTx) Chain() store.ChainAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Chain")
ret0, _ := ret[0].(store.ChainAgent)
return ret0
}
// Chain indicates an expected call of Chain
func (mr *MockTxMockRecorder) Chain() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chain", reflect.TypeOf((*MockTx)(nil).Chain))
}
// PrivateTxManager mocks base method
func (m *MockTx) PrivateTxManager() store.PrivateTxManagerAgent {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PrivateTxManager")
ret0, _ := ret[0].(store.PrivateTxManagerAgent)
return ret0
}
// PrivateTxManager indicates an expected call of PrivateTxManager
func (mr *MockTxMockRecorder) PrivateTxManager() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrivateTxManager", reflect.TypeOf((*MockTx)(nil).PrivateTxManager))
}
// MockTransactionRequestAgent is a mock of TransactionRequestAgent interface
type MockTransactionRequestAgent struct {
ctrl *gomock.Controller
recorder *MockTransactionRequestAgentMockRecorder
}
// MockTransactionRequestAgentMockRecorder is the mock recorder for MockTransactionRequestAgent
type MockTransactionRequestAgentMockRecorder struct {
mock *MockTransactionRequestAgent
}
// NewMockTransactionRequestAgent creates a new mock instance
func NewMockTransactionRequestAgent(ctrl *gomock.Controller) *MockTransactionRequestAgent {
mock := &MockTransactionRequestAgent{ctrl: ctrl}
mock.recorder = &MockTransactionRequestAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockTransactionRequestAgent) EXPECT() *MockTransactionRequestAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockTransactionRequestAgent) Insert(ctx context.Context, txRequest *models.TransactionRequest) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, txRequest)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockTransactionRequestAgentMockRecorder) Insert(ctx, txRequest interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockTransactionRequestAgent)(nil).Insert), ctx, txRequest)
}
// FindOneByIdempotencyKey mocks base method
func (m *MockTransactionRequestAgent) FindOneByIdempotencyKey(ctx context.Context, idempotencyKey, tenantID, ownerID string) (*models.TransactionRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByIdempotencyKey", ctx, idempotencyKey, tenantID, ownerID)
ret0, _ := ret[0].(*models.TransactionRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByIdempotencyKey indicates an expected call of FindOneByIdempotencyKey
func (mr *MockTransactionRequestAgentMockRecorder) FindOneByIdempotencyKey(ctx, idempotencyKey, tenantID, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByIdempotencyKey", reflect.TypeOf((*MockTransactionRequestAgent)(nil).FindOneByIdempotencyKey), ctx, idempotencyKey, tenantID, ownerID)
}
// FindOneByUUID mocks base method
func (m *MockTransactionRequestAgent) FindOneByUUID(ctx context.Context, scheduleUUID string, tenants []string, ownerID string) (*models.TransactionRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByUUID", ctx, scheduleUUID, tenants, ownerID)
ret0, _ := ret[0].(*models.TransactionRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByUUID indicates an expected call of FindOneByUUID
func (mr *MockTransactionRequestAgentMockRecorder) FindOneByUUID(ctx, scheduleUUID, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByUUID", reflect.TypeOf((*MockTransactionRequestAgent)(nil).FindOneByUUID), ctx, scheduleUUID, tenants, ownerID)
}
// Search mocks base method
func (m *MockTransactionRequestAgent) Search(ctx context.Context, filters *entities.TransactionRequestFilters, tenants []string, ownerID string) ([]*models.TransactionRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Search", ctx, filters, tenants, ownerID)
ret0, _ := ret[0].([]*models.TransactionRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Search indicates an expected call of Search
func (mr *MockTransactionRequestAgentMockRecorder) Search(ctx, filters, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Search", reflect.TypeOf((*MockTransactionRequestAgent)(nil).Search), ctx, filters, tenants, ownerID)
}
// MockScheduleAgent is a mock of ScheduleAgent interface
type MockScheduleAgent struct {
ctrl *gomock.Controller
recorder *MockScheduleAgentMockRecorder
}
// MockScheduleAgentMockRecorder is the mock recorder for MockScheduleAgent
type MockScheduleAgentMockRecorder struct {
mock *MockScheduleAgent
}
// NewMockScheduleAgent creates a new mock instance
func NewMockScheduleAgent(ctrl *gomock.Controller) *MockScheduleAgent {
mock := &MockScheduleAgent{ctrl: ctrl}
mock.recorder = &MockScheduleAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockScheduleAgent) EXPECT() *MockScheduleAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockScheduleAgent) Insert(ctx context.Context, schedule *models.Schedule) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, schedule)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockScheduleAgentMockRecorder) Insert(ctx, schedule interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockScheduleAgent)(nil).Insert), ctx, schedule)
}
// FindOneByUUID mocks base method
func (m *MockScheduleAgent) FindOneByUUID(ctx context.Context, uuid string, tenants []string, ownerID string) (*models.Schedule, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByUUID", ctx, uuid, tenants, ownerID)
ret0, _ := ret[0].(*models.Schedule)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByUUID indicates an expected call of FindOneByUUID
func (mr *MockScheduleAgentMockRecorder) FindOneByUUID(ctx, uuid, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByUUID", reflect.TypeOf((*MockScheduleAgent)(nil).FindOneByUUID), ctx, uuid, tenants, ownerID)
}
// FindAll mocks base method
func (m *MockScheduleAgent) FindAll(ctx context.Context, tenants []string, ownerID string) ([]*models.Schedule, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindAll", ctx, tenants, ownerID)
ret0, _ := ret[0].([]*models.Schedule)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindAll indicates an expected call of FindAll
func (mr *MockScheduleAgentMockRecorder) FindAll(ctx, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindAll", reflect.TypeOf((*MockScheduleAgent)(nil).FindAll), ctx, tenants, ownerID)
}
// MockJobAgent is a mock of JobAgent interface
type MockJobAgent struct {
ctrl *gomock.Controller
recorder *MockJobAgentMockRecorder
}
// MockJobAgentMockRecorder is the mock recorder for MockJobAgent
type MockJobAgentMockRecorder struct {
mock *MockJobAgent
}
// NewMockJobAgent creates a new mock instance
func NewMockJobAgent(ctrl *gomock.Controller) *MockJobAgent {
mock := &MockJobAgent{ctrl: ctrl}
mock.recorder = &MockJobAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockJobAgent) EXPECT() *MockJobAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockJobAgent) Insert(ctx context.Context, job *models.Job) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, job)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockJobAgentMockRecorder) Insert(ctx, job interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockJobAgent)(nil).Insert), ctx, job)
}
// Update mocks base method
func (m *MockJobAgent) Update(ctx context.Context, job *models.Job) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", ctx, job)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update
func (mr *MockJobAgentMockRecorder) Update(ctx, job interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockJobAgent)(nil).Update), ctx, job)
}
// FindOneByUUID mocks base method
func (m *MockJobAgent) FindOneByUUID(ctx context.Context, uuid string, tenants []string, ownerID string, withLogs bool) (*models.Job, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByUUID", ctx, uuid, tenants, ownerID, withLogs)
ret0, _ := ret[0].(*models.Job)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByUUID indicates an expected call of FindOneByUUID
func (mr *MockJobAgentMockRecorder) FindOneByUUID(ctx, uuid, tenants, ownerID, withLogs interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByUUID", reflect.TypeOf((*MockJobAgent)(nil).FindOneByUUID), ctx, uuid, tenants, ownerID, withLogs)
}
// LockOneByUUID mocks base method
func (m *MockJobAgent) LockOneByUUID(ctx context.Context, uuid string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LockOneByUUID", ctx, uuid)
ret0, _ := ret[0].(error)
return ret0
}
// LockOneByUUID indicates an expected call of LockOneByUUID
func (mr *MockJobAgentMockRecorder) LockOneByUUID(ctx, uuid interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockOneByUUID", reflect.TypeOf((*MockJobAgent)(nil).LockOneByUUID), ctx, uuid)
}
// Search mocks base method
func (m *MockJobAgent) Search(ctx context.Context, filters *entities.JobFilters, tenants []string, ownerID string) ([]*models.Job, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Search", ctx, filters, tenants, ownerID)
ret0, _ := ret[0].([]*models.Job)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Search indicates an expected call of Search
func (mr *MockJobAgentMockRecorder) Search(ctx, filters, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Search", reflect.TypeOf((*MockJobAgent)(nil).Search), ctx, filters, tenants, ownerID)
}
// MockLogAgent is a mock of LogAgent interface
type MockLogAgent struct {
ctrl *gomock.Controller
recorder *MockLogAgentMockRecorder
}
// MockLogAgentMockRecorder is the mock recorder for MockLogAgent
type MockLogAgentMockRecorder struct {
mock *MockLogAgent
}
// NewMockLogAgent creates a new mock instance
func NewMockLogAgent(ctrl *gomock.Controller) *MockLogAgent {
mock := &MockLogAgent{ctrl: ctrl}
mock.recorder = &MockLogAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockLogAgent) EXPECT() *MockLogAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockLogAgent) Insert(ctx context.Context, log *models.Log) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, log)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockLogAgentMockRecorder) Insert(ctx, log interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockLogAgent)(nil).Insert), ctx, log)
}
// MockTransactionAgent is a mock of TransactionAgent interface
type MockTransactionAgent struct {
ctrl *gomock.Controller
recorder *MockTransactionAgentMockRecorder
}
// MockTransactionAgentMockRecorder is the mock recorder for MockTransactionAgent
type MockTransactionAgentMockRecorder struct {
mock *MockTransactionAgent
}
// NewMockTransactionAgent creates a new mock instance
func NewMockTransactionAgent(ctrl *gomock.Controller) *MockTransactionAgent {
mock := &MockTransactionAgent{ctrl: ctrl}
mock.recorder = &MockTransactionAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockTransactionAgent) EXPECT() *MockTransactionAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockTransactionAgent) Insert(ctx context.Context, tx *models.Transaction) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, tx)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockTransactionAgentMockRecorder) Insert(ctx, tx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockTransactionAgent)(nil).Insert), ctx, tx)
}
// Update mocks base method
func (m *MockTransactionAgent) Update(ctx context.Context, tx *models.Transaction) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", ctx, tx)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update
func (mr *MockTransactionAgentMockRecorder) Update(ctx, tx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockTransactionAgent)(nil).Update), ctx, tx)
}
// MockAccountAgent is a mock of AccountAgent interface
type MockAccountAgent struct {
ctrl *gomock.Controller
recorder *MockAccountAgentMockRecorder
}
// MockAccountAgentMockRecorder is the mock recorder for MockAccountAgent
type MockAccountAgentMockRecorder struct {
mock *MockAccountAgent
}
// NewMockAccountAgent creates a new mock instance
func NewMockAccountAgent(ctrl *gomock.Controller) *MockAccountAgent {
mock := &MockAccountAgent{ctrl: ctrl}
mock.recorder = &MockAccountAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockAccountAgent) EXPECT() *MockAccountAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockAccountAgent) Insert(ctx context.Context, account *models.Account) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, account)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockAccountAgentMockRecorder) Insert(ctx, account interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockAccountAgent)(nil).Insert), ctx, account)
}
// Update mocks base method
func (m *MockAccountAgent) Update(ctx context.Context, account *models.Account) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", ctx, account)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update
func (mr *MockAccountAgentMockRecorder) Update(ctx, account interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockAccountAgent)(nil).Update), ctx, account)
}
// FindOneByAddress mocks base method
func (m *MockAccountAgent) FindOneByAddress(ctx context.Context, address string, tenants []string, ownerID string) (*models.Account, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByAddress", ctx, address, tenants, ownerID)
ret0, _ := ret[0].(*models.Account)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByAddress indicates an expected call of FindOneByAddress
func (mr *MockAccountAgentMockRecorder) FindOneByAddress(ctx, address, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByAddress", reflect.TypeOf((*MockAccountAgent)(nil).FindOneByAddress), ctx, address, tenants, ownerID)
}
// Search mocks base method
func (m *MockAccountAgent) Search(ctx context.Context, filters *entities.AccountFilters, tenants []string, ownerID string) ([]*models.Account, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Search", ctx, filters, tenants, ownerID)
ret0, _ := ret[0].([]*models.Account)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Search indicates an expected call of Search
func (mr *MockAccountAgentMockRecorder) Search(ctx, filters, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Search", reflect.TypeOf((*MockAccountAgent)(nil).Search), ctx, filters, tenants, ownerID)
}
// MockFaucetAgent is a mock of FaucetAgent interface
type MockFaucetAgent struct {
ctrl *gomock.Controller
recorder *MockFaucetAgentMockRecorder
}
// MockFaucetAgentMockRecorder is the mock recorder for MockFaucetAgent
type MockFaucetAgentMockRecorder struct {
mock *MockFaucetAgent
}
// NewMockFaucetAgent creates a new mock instance
func NewMockFaucetAgent(ctrl *gomock.Controller) *MockFaucetAgent {
mock := &MockFaucetAgent{ctrl: ctrl}
mock.recorder = &MockFaucetAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockFaucetAgent) EXPECT() *MockFaucetAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockFaucetAgent) Insert(ctx context.Context, faucet *models.Faucet) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, faucet)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockFaucetAgentMockRecorder) Insert(ctx, faucet interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockFaucetAgent)(nil).Insert), ctx, faucet)
}
// Update mocks base method
func (m *MockFaucetAgent) Update(ctx context.Context, faucet *models.Faucet, tenants []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", ctx, faucet, tenants)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update
func (mr *MockFaucetAgentMockRecorder) Update(ctx, faucet, tenants interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockFaucetAgent)(nil).Update), ctx, faucet, tenants)
}
// FindOneByUUID mocks base method
func (m *MockFaucetAgent) FindOneByUUID(ctx context.Context, uuid string, tenants []string) (*models.Faucet, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByUUID", ctx, uuid, tenants)
ret0, _ := ret[0].(*models.Faucet)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByUUID indicates an expected call of FindOneByUUID
func (mr *MockFaucetAgentMockRecorder) FindOneByUUID(ctx, uuid, tenants interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByUUID", reflect.TypeOf((*MockFaucetAgent)(nil).FindOneByUUID), ctx, uuid, tenants)
}
// Search mocks base method
func (m *MockFaucetAgent) Search(ctx context.Context, filters *entities.FaucetFilters, tenants []string) ([]*models.Faucet, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Search", ctx, filters, tenants)
ret0, _ := ret[0].([]*models.Faucet)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Search indicates an expected call of Search
func (mr *MockFaucetAgentMockRecorder) Search(ctx, filters, tenants interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Search", reflect.TypeOf((*MockFaucetAgent)(nil).Search), ctx, filters, tenants)
}
// Delete mocks base method
func (m *MockFaucetAgent) Delete(ctx context.Context, faucet *models.Faucet, tenants []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", ctx, faucet, tenants)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete
func (mr *MockFaucetAgentMockRecorder) Delete(ctx, faucet, tenants interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockFaucetAgent)(nil).Delete), ctx, faucet, tenants)
}
// MockChainAgent is a mock of ChainAgent interface
type MockChainAgent struct {
ctrl *gomock.Controller
recorder *MockChainAgentMockRecorder
}
// MockChainAgentMockRecorder is the mock recorder for MockChainAgent
type MockChainAgentMockRecorder struct {
mock *MockChainAgent
}
// NewMockChainAgent creates a new mock instance
func NewMockChainAgent(ctrl *gomock.Controller) *MockChainAgent {
mock := &MockChainAgent{ctrl: ctrl}
mock.recorder = &MockChainAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockChainAgent) EXPECT() *MockChainAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockChainAgent) Insert(ctx context.Context, chain *models.Chain) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, chain)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockChainAgentMockRecorder) Insert(ctx, chain interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockChainAgent)(nil).Insert), ctx, chain)
}
// Update mocks base method
func (m *MockChainAgent) Update(ctx context.Context, chain *models.Chain, tenants []string, ownerID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", ctx, chain, tenants, ownerID)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update
func (mr *MockChainAgentMockRecorder) Update(ctx, chain, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockChainAgent)(nil).Update), ctx, chain, tenants, ownerID)
}
// Search mocks base method
func (m *MockChainAgent) Search(ctx context.Context, filters *entities.ChainFilters, tenants []string, ownerID string) ([]*models.Chain, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Search", ctx, filters, tenants, ownerID)
ret0, _ := ret[0].([]*models.Chain)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Search indicates an expected call of Search
func (mr *MockChainAgentMockRecorder) Search(ctx, filters, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Search", reflect.TypeOf((*MockChainAgent)(nil).Search), ctx, filters, tenants, ownerID)
}
// FindOneByUUID mocks base method
func (m *MockChainAgent) FindOneByUUID(ctx context.Context, uuid string, tenants []string, ownerID string) (*models.Chain, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByUUID", ctx, uuid, tenants, ownerID)
ret0, _ := ret[0].(*models.Chain)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByUUID indicates an expected call of FindOneByUUID
func (mr *MockChainAgentMockRecorder) FindOneByUUID(ctx, uuid, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByUUID", reflect.TypeOf((*MockChainAgent)(nil).FindOneByUUID), ctx, uuid, tenants, ownerID)
}
// FindOneByName mocks base method
func (m *MockChainAgent) FindOneByName(ctx context.Context, name string, tenants []string, ownerID string) (*models.Chain, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByName", ctx, name, tenants, ownerID)
ret0, _ := ret[0].(*models.Chain)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByName indicates an expected call of FindOneByName
func (mr *MockChainAgentMockRecorder) FindOneByName(ctx, name, tenants, ownerID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByName", reflect.TypeOf((*MockChainAgent)(nil).FindOneByName), ctx, name, tenants, ownerID)
}
// Delete mocks base method
func (m *MockChainAgent) Delete(ctx context.Context, chain *models.Chain, tenants []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", ctx, chain, tenants)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete
func (mr *MockChainAgentMockRecorder) Delete(ctx, chain, tenants interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockChainAgent)(nil).Delete), ctx, chain, tenants)
}
// MockPrivateTxManagerAgent is a mock of PrivateTxManagerAgent interface
type MockPrivateTxManagerAgent struct {
ctrl *gomock.Controller
recorder *MockPrivateTxManagerAgentMockRecorder
}
// MockPrivateTxManagerAgentMockRecorder is the mock recorder for MockPrivateTxManagerAgent
type MockPrivateTxManagerAgentMockRecorder struct {
mock *MockPrivateTxManagerAgent
}
// NewMockPrivateTxManagerAgent creates a new mock instance
func NewMockPrivateTxManagerAgent(ctrl *gomock.Controller) *MockPrivateTxManagerAgent {
mock := &MockPrivateTxManagerAgent{ctrl: ctrl}
mock.recorder = &MockPrivateTxManagerAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockPrivateTxManagerAgent) EXPECT() *MockPrivateTxManagerAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockPrivateTxManagerAgent) Insert(ctx context.Context, privateTxManager *models.PrivateTxManager) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, privateTxManager)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockPrivateTxManagerAgentMockRecorder) Insert(ctx, privateTxManager interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockPrivateTxManagerAgent)(nil).Insert), ctx, privateTxManager)
}
// Update mocks base method
func (m *MockPrivateTxManagerAgent) Update(ctx context.Context, privateTxManager *models.PrivateTxManager) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", ctx, privateTxManager)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update
func (mr *MockPrivateTxManagerAgentMockRecorder) Update(ctx, privateTxManager interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockPrivateTxManagerAgent)(nil).Update), ctx, privateTxManager)
}
// Search mocks base method
func (m *MockPrivateTxManagerAgent) Search(ctx context.Context, chainUUID string) ([]*models.PrivateTxManager, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Search", ctx, chainUUID)
ret0, _ := ret[0].([]*models.PrivateTxManager)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Search indicates an expected call of Search
func (mr *MockPrivateTxManagerAgentMockRecorder) Search(ctx, chainUUID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Search", reflect.TypeOf((*MockPrivateTxManagerAgent)(nil).Search), ctx, chainUUID)
}
// Delete mocks base method
func (m *MockPrivateTxManagerAgent) Delete(ctx context.Context, privateTxManager *models.PrivateTxManager) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", ctx, privateTxManager)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete
func (mr *MockPrivateTxManagerAgentMockRecorder) Delete(ctx, privateTxManager interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockPrivateTxManagerAgent)(nil).Delete), ctx, privateTxManager)
}
// MockArtifactAgent is a mock of ArtifactAgent interface
type MockArtifactAgent struct {
ctrl *gomock.Controller
recorder *MockArtifactAgentMockRecorder
}
// MockArtifactAgentMockRecorder is the mock recorder for MockArtifactAgent
type MockArtifactAgentMockRecorder struct {
mock *MockArtifactAgent
}
// NewMockArtifactAgent creates a new mock instance
func NewMockArtifactAgent(ctrl *gomock.Controller) *MockArtifactAgent {
mock := &MockArtifactAgent{ctrl: ctrl}
mock.recorder = &MockArtifactAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockArtifactAgent) EXPECT() *MockArtifactAgentMockRecorder {
return m.recorder
}
// FindOneByABIAndCodeHash mocks base method
func (m *MockArtifactAgent) FindOneByABIAndCodeHash(ctx context.Context, abi, codeHash string) (*models.ArtifactModel, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByABIAndCodeHash", ctx, abi, codeHash)
ret0, _ := ret[0].(*models.ArtifactModel)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByABIAndCodeHash indicates an expected call of FindOneByABIAndCodeHash
func (mr *MockArtifactAgentMockRecorder) FindOneByABIAndCodeHash(ctx, abi, codeHash interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByABIAndCodeHash", reflect.TypeOf((*MockArtifactAgent)(nil).FindOneByABIAndCodeHash), ctx, abi, codeHash)
}
// SelectOrInsert mocks base method
func (m *MockArtifactAgent) SelectOrInsert(ctx context.Context, artifact *models.ArtifactModel) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SelectOrInsert", ctx, artifact)
ret0, _ := ret[0].(error)
return ret0
}
// SelectOrInsert indicates an expected call of SelectOrInsert
func (mr *MockArtifactAgentMockRecorder) SelectOrInsert(ctx, artifact interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectOrInsert", reflect.TypeOf((*MockArtifactAgent)(nil).SelectOrInsert), ctx, artifact)
}
// Insert mocks base method
func (m *MockArtifactAgent) Insert(ctx context.Context, artifact *models.ArtifactModel) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, artifact)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockArtifactAgentMockRecorder) Insert(ctx, artifact interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockArtifactAgent)(nil).Insert), ctx, artifact)
}
// FindOneByNameAndTag mocks base method
func (m *MockArtifactAgent) FindOneByNameAndTag(ctx context.Context, name, tag string) (*models.ArtifactModel, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByNameAndTag", ctx, name, tag)
ret0, _ := ret[0].(*models.ArtifactModel)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByNameAndTag indicates an expected call of FindOneByNameAndTag
func (mr *MockArtifactAgentMockRecorder) FindOneByNameAndTag(ctx, name, tag interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByNameAndTag", reflect.TypeOf((*MockArtifactAgent)(nil).FindOneByNameAndTag), ctx, name, tag)
}
// MockCodeHashAgent is a mock of CodeHashAgent interface
type MockCodeHashAgent struct {
ctrl *gomock.Controller
recorder *MockCodeHashAgentMockRecorder
}
// MockCodeHashAgentMockRecorder is the mock recorder for MockCodeHashAgent
type MockCodeHashAgentMockRecorder struct {
mock *MockCodeHashAgent
}
// NewMockCodeHashAgent creates a new mock instance
func NewMockCodeHashAgent(ctrl *gomock.Controller) *MockCodeHashAgent {
mock := &MockCodeHashAgent{ctrl: ctrl}
mock.recorder = &MockCodeHashAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockCodeHashAgent) EXPECT() *MockCodeHashAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockCodeHashAgent) Insert(ctx context.Context, codehash *models.CodehashModel) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, codehash)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockCodeHashAgentMockRecorder) Insert(ctx, codehash interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockCodeHashAgent)(nil).Insert), ctx, codehash)
}
// MockEventAgent is a mock of EventAgent interface
type MockEventAgent struct {
ctrl *gomock.Controller
recorder *MockEventAgentMockRecorder
}
// MockEventAgentMockRecorder is the mock recorder for MockEventAgent
type MockEventAgentMockRecorder struct {
mock *MockEventAgent
}
// NewMockEventAgent creates a new mock instance
func NewMockEventAgent(ctrl *gomock.Controller) *MockEventAgent {
mock := &MockEventAgent{ctrl: ctrl}
mock.recorder = &MockEventAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockEventAgent) EXPECT() *MockEventAgentMockRecorder {
return m.recorder
}
// InsertMultiple mocks base method
func (m *MockEventAgent) InsertMultiple(ctx context.Context, events []*models.EventModel) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InsertMultiple", ctx, events)
ret0, _ := ret[0].(error)
return ret0
}
// InsertMultiple indicates an expected call of InsertMultiple
func (mr *MockEventAgentMockRecorder) InsertMultiple(ctx, events interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertMultiple", reflect.TypeOf((*MockEventAgent)(nil).InsertMultiple), ctx, events)
}
// FindOneByAccountAndSigHash mocks base method
func (m *MockEventAgent) FindOneByAccountAndSigHash(ctx context.Context, chainID, address, sighash string, indexedInputCount uint32) (*models.EventModel, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByAccountAndSigHash", ctx, chainID, address, sighash, indexedInputCount)
ret0, _ := ret[0].(*models.EventModel)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByAccountAndSigHash indicates an expected call of FindOneByAccountAndSigHash
func (mr *MockEventAgentMockRecorder) FindOneByAccountAndSigHash(ctx, chainID, address, sighash, indexedInputCount interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByAccountAndSigHash", reflect.TypeOf((*MockEventAgent)(nil).FindOneByAccountAndSigHash), ctx, chainID, address, sighash, indexedInputCount)
}
// FindDefaultBySigHash mocks base method
func (m *MockEventAgent) FindDefaultBySigHash(ctx context.Context, sighash string, indexedInputCount uint32) ([]*models.EventModel, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindDefaultBySigHash", ctx, sighash, indexedInputCount)
ret0, _ := ret[0].([]*models.EventModel)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindDefaultBySigHash indicates an expected call of FindDefaultBySigHash
func (mr *MockEventAgentMockRecorder) FindDefaultBySigHash(ctx, sighash, indexedInputCount interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindDefaultBySigHash", reflect.TypeOf((*MockEventAgent)(nil).FindDefaultBySigHash), ctx, sighash, indexedInputCount)
}
// MockMethodAgent is a mock of MethodAgent interface
type MockMethodAgent struct {
ctrl *gomock.Controller
recorder *MockMethodAgentMockRecorder
}
// MockMethodAgentMockRecorder is the mock recorder for MockMethodAgent
type MockMethodAgentMockRecorder struct {
mock *MockMethodAgent
}
// NewMockMethodAgent creates a new mock instance
func NewMockMethodAgent(ctrl *gomock.Controller) *MockMethodAgent {
mock := &MockMethodAgent{ctrl: ctrl}
mock.recorder = &MockMethodAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockMethodAgent) EXPECT() *MockMethodAgentMockRecorder {
return m.recorder
}
// InsertMultiple mocks base method
func (m *MockMethodAgent) InsertMultiple(ctx context.Context, methods []*models.MethodModel) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InsertMultiple", ctx, methods)
ret0, _ := ret[0].(error)
return ret0
}
// InsertMultiple indicates an expected call of InsertMultiple
func (mr *MockMethodAgentMockRecorder) InsertMultiple(ctx, methods interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertMultiple", reflect.TypeOf((*MockMethodAgent)(nil).InsertMultiple), ctx, methods)
}
// FindOneByAccountAndSelector mocks base method
func (m *MockMethodAgent) FindOneByAccountAndSelector(ctx context.Context, chainID, address string, selector []byte) (*models.MethodModel, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOneByAccountAndSelector", ctx, chainID, address, selector)
ret0, _ := ret[0].(*models.MethodModel)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOneByAccountAndSelector indicates an expected call of FindOneByAccountAndSelector
func (mr *MockMethodAgentMockRecorder) FindOneByAccountAndSelector(ctx, chainID, address, selector interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOneByAccountAndSelector", reflect.TypeOf((*MockMethodAgent)(nil).FindOneByAccountAndSelector), ctx, chainID, address, selector)
}
// FindDefaultBySelector mocks base method
func (m *MockMethodAgent) FindDefaultBySelector(ctx context.Context, selector []byte) ([]*models.MethodModel, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindDefaultBySelector", ctx, selector)
ret0, _ := ret[0].([]*models.MethodModel)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindDefaultBySelector indicates an expected call of FindDefaultBySelector
func (mr *MockMethodAgentMockRecorder) FindDefaultBySelector(ctx, selector interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindDefaultBySelector", reflect.TypeOf((*MockMethodAgent)(nil).FindDefaultBySelector), ctx, selector)
}
// MockRepositoryAgent is a mock of RepositoryAgent interface
type MockRepositoryAgent struct {
ctrl *gomock.Controller
recorder *MockRepositoryAgentMockRecorder
}
// MockRepositoryAgentMockRecorder is the mock recorder for MockRepositoryAgent
type MockRepositoryAgentMockRecorder struct {
mock *MockRepositoryAgent
}
// NewMockRepositoryAgent creates a new mock instance
func NewMockRepositoryAgent(ctrl *gomock.Controller) *MockRepositoryAgent {
mock := &MockRepositoryAgent{ctrl: ctrl}
mock.recorder = &MockRepositoryAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockRepositoryAgent) EXPECT() *MockRepositoryAgentMockRecorder {
return m.recorder
}
// SelectOrInsert mocks base method
func (m *MockRepositoryAgent) SelectOrInsert(ctx context.Context, repository *models.RepositoryModel) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SelectOrInsert", ctx, repository)
ret0, _ := ret[0].(error)
return ret0
}
// SelectOrInsert indicates an expected call of SelectOrInsert
func (mr *MockRepositoryAgentMockRecorder) SelectOrInsert(ctx, repository interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectOrInsert", reflect.TypeOf((*MockRepositoryAgent)(nil).SelectOrInsert), ctx, repository)
}
// Insert mocks base method
func (m *MockRepositoryAgent) Insert(ctx context.Context, repository *models.RepositoryModel) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, repository)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockRepositoryAgentMockRecorder) Insert(ctx, repository interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockRepositoryAgent)(nil).Insert), ctx, repository)
}
// FindOne mocks base method
func (m *MockRepositoryAgent) FindOne(ctx context.Context, name string) (*models.RepositoryModel, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindOne", ctx, name)
ret0, _ := ret[0].(*models.RepositoryModel)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindOne indicates an expected call of FindOne
func (mr *MockRepositoryAgentMockRecorder) FindOne(ctx, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOne", reflect.TypeOf((*MockRepositoryAgent)(nil).FindOne), ctx, name)
}
// FindAll mocks base method
func (m *MockRepositoryAgent) FindAll(ctx context.Context) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindAll", ctx)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindAll indicates an expected call of FindAll
func (mr *MockRepositoryAgentMockRecorder) FindAll(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindAll", reflect.TypeOf((*MockRepositoryAgent)(nil).FindAll), ctx)
}
// MockTagAgent is a mock of TagAgent interface
type MockTagAgent struct {
ctrl *gomock.Controller
recorder *MockTagAgentMockRecorder
}
// MockTagAgentMockRecorder is the mock recorder for MockTagAgent
type MockTagAgentMockRecorder struct {
mock *MockTagAgent
}
// NewMockTagAgent creates a new mock instance
func NewMockTagAgent(ctrl *gomock.Controller) *MockTagAgent {
mock := &MockTagAgent{ctrl: ctrl}
mock.recorder = &MockTagAgentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockTagAgent) EXPECT() *MockTagAgentMockRecorder {
return m.recorder
}
// Insert mocks base method
func (m *MockTagAgent) Insert(ctx context.Context, tag *models.TagModel) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", ctx, tag)
ret0, _ := ret[0].(error)
return ret0
}
// Insert indicates an expected call of Insert
func (mr *MockTagAgentMockRecorder) Insert(ctx, tag interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Insert", reflect.TypeOf((*MockTagAgent)(nil).Insert), ctx, tag)
}
// FindAllByName mocks base method
func (m *MockTagAgent) FindAllByName(ctx context.Context, name string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindAllByName", ctx, name)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FindAllByName indicates an expected call of FindAllByName
func (mr *MockTagAgentMockRecorder) FindAllByName(ctx, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindAllByName", reflect.TypeOf((*MockTagAgent)(nil).FindAllByName), ctx, name)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
pkg/crc/preflight/preflight_checks_windows.go
|
package preflight
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/code-ready/crc/pkg/crc/errors"
"github.com/code-ready/crc/pkg/crc/logging"
winnet "github.com/code-ready/crc/pkg/os/windows/network"
"github.com/code-ready/crc/pkg/os/windows/powershell"
"github.com/code-ready/crc/pkg/crc/machine/hyperv"
)
const (
// Fall Creators update comes with the "Default Switch"
minimumWindowsReleaseId = 1709
)
func checkVersionOfWindowsUpdate() error {
windowsReleaseId := `(Get-ItemProperty -Path "HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion" -Name ReleaseId).ReleaseId`
stdOut, _, err := powershell.Execute(windowsReleaseId)
if err != nil {
logging.Debug(err.Error())
return errors.New("Failed to get Windows release id")
}
yourWindowsReleaseId, err := strconv.Atoi(strings.TrimSpace(stdOut))
if err != nil {
logging.Debug(err.Error())
return errors.Newf("Failed to parse Windows release id: %s", stdOut)
}
if yourWindowsReleaseId < minimumWindowsReleaseId {
return errors.Newf("Please update Windows. Currently %d is the minimum release needed to run. You are running %d", minimumWindowsReleaseId, yourWindowsReleaseId)
}
return nil
}
func checkHyperVInstalled() error {
// check to see if a hypervisor is present. if hyper-v is installed and enabled,
checkHypervisorPresent := `@(Get-Wmiobject Win32_ComputerSystem).HypervisorPresent`
stdOut, _, err := powershell.Execute(checkHypervisorPresent)
if err != nil {
logging.Debug(err.Error())
return errors.New("Failed checking if Hyper-V is installed")
}
if !strings.Contains(stdOut, "True") {
return errors.New("Hyper-V not installed")
}
checkVmmsExists := `@(Get-Service vmms).Status`
_, stdErr, err := powershell.Execute(checkVmmsExists)
if err != nil {
logging.Debug(err.Error())
return errors.New("Failed checking if Hyper-V management service exists")
}
if strings.Contains(stdErr, "Get-Service") {
return errors.New("Hyper-V management service not available")
}
return nil
}
//
func fixHyperVInstalled() error {
enableHyperVCommand := `Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V -All`
_, _, err := powershell.ExecuteAsAdmin("enable Hyper-V", enableHyperVCommand)
if err != nil {
logging.Debug(err.Error())
return errors.New("Error occurred installing Hyper-V")
}
// We do need to error out as a restart might be needed (unfortunately no output redirect possible)
logging.Error("Please reboot your system")
return nil
}
func checkHyperVServiceRunning() error {
// Check if Hyper-V's Virtual Machine Management Service is running
checkVmmsRunning := `@(Get-Service vmms).Status`
stdOut, _, err := powershell.Execute(checkVmmsRunning)
if err != nil {
logging.Debug(err.Error())
return errors.New("Failed checking if Hyper-V is running")
}
if strings.TrimSpace(stdOut) != "Running" {
return errors.New("Hyper-V Virtual Machine Management service not running")
}
return nil
}
func fixHyperVServiceRunning() error {
enableVmmsService := `Set-Service -Name vmms -StartupType Automatic; Set-Service -Name vmms -Status Running -PassThru`
_, _, err := powershell.ExecuteAsAdmin("enable Hyper-V service", enableVmmsService)
if err != nil {
logging.Debug(err.Error())
return errors.New("Error occurred enabling Hyper-V service")
}
return nil
}
func checkIfUserPartOfHyperVAdmins() error {
// https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems
// BUILTIN\Hyper-V Administrators => S-1-5-32-578
checkIfMemberOfHyperVAdmins :=
`$sid = New-Object System.Security.Principal.SecurityIdentifier("S-1-5-32-578")
@([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole($sid)`
stdOut, _, err := powershell.Execute(checkIfMemberOfHyperVAdmins)
if err != nil {
logging.Debug(err.Error())
return errors.New("Failed checking if user is part of hyperv admins group")
}
if !strings.Contains(stdOut, "True") {
return errors.New("User is not a member of the Hyper-V administrators group")
}
return nil
}
func fixUserPartOfHyperVAdmins() error {
outGroupName, _, err := powershell.Execute(`(New-Object System.Security.Principal.SecurityIdentifier("S-1-5-32-578")).Translate([System.Security.Principal.NTAccount]).Value`)
if err != nil {
logging.Debug(err.Error())
return errors.New("Unable to get group name")
}
groupName := strings.TrimSpace(strings.Replace(strings.TrimSpace(outGroupName), "BUILTIN\\", "", -1))
username := os.Getenv("USERNAME")
netCmdArgs := fmt.Sprintf(`([adsi]"WinNT://./%s,group").Add("WinNT://%s,user")`, groupName, username)
_, _, err = powershell.ExecuteAsAdmin("add user to hyperv admins group", netCmdArgs)
if err != nil {
logging.Debug(err.Error())
return errors.New("Unable to get user name")
}
return nil
}
func checkIfHyperVVirtualSwitchExists() error {
switchName := hyperv.AlternativeNetwork
// use winnet instead
exists, foundName := winnet.SelectSwitchByNameOrDefault(switchName)
if exists {
logging.Info("Found Virtual Switch to use: ", foundName)
return nil
}
return errors.New("Virtual Switch not found")
}
func checkIfRunningAsNormalUser() error {
if !powershell.IsAdmin() {
return nil
}
logging.Debug("Ran as administrator")
return fmt.Errorf("crc should be ran as a normal user")
}
|
[
"\"USERNAME\""
] |
[] |
[
"USERNAME"
] |
[]
|
["USERNAME"]
|
go
| 1 | 0 | |
numpy_mpi_numexpr/cfd_mpi_ne.py
|
#!/usr/bin/env python
#
# CFD Calculation with MPI4PY
# ===============
#
# Simulation of inviscid flow in a 2D box using the Jacobi algorithm.
#
# Python version - uses numpy and loops
#
# Alejandro Dinkelberg
#
import os
import sys
#import mkl
import time
import mpi4py.MPI as MPI
# Import numpy
import numpy as np
import numexpr as ne
from copy import deepcopy
os.environ['NUMEXPR_MAX_THREADS'] = '128'
ne.set_num_threads(2)
#mkl.set_num_threads(128)
#ne.set_vml_num_threads(128)
#ne.set_vml_accuracy_mode('fast')
##################################################################################################################################################################
# boundary and haloSWAP
def boundarypsi(psi, m, n, b, h, w, comm):
# initialize the std values MPI
rank = comm.Get_rank()
size = comm.Get_size()
istart = m*rank + 1
istop = istart + m - 1
# BCs on bottom edge
for i in range(b+1, b+w):
if i >= istart and i <= istop:
psi[i-istart+1][0] = i-b
for i in range(b+w, m*size+1):
if i >= istart and i <= istop:
psi[i-istart+1][0] = w
# BCS on RHS
if rank == size-1:
for j in range(1, h+1):
psi[m+1][j] = w
for j in range(h+1, h+w):
psi[m+1][j]= w-j+h
def boundaryzet(zet, psi, m, n, comm):
# initialize the std values MPI
rank = comm.Get_rank()
size = comm.Get_size()
istart = m*rank + 1
istop = istart + m - 1
# set top/bottom BCs:
zet[1:m+1, 0] = 2 * (psi[1:m+1, 1] - psi[1:m+1, 0])
zet[1:m+1, n+1] = 2 * (psi[1:m+1, n] - psi[1:m+1, n+1])
# Set left BCs
if 0 == rank:
zet[0, 1:n+1] = 2 * (psi[1, 1:n+1] - psi[0, 1:n+1])
# Set right BCs
if size-1 == rank:
zet[m+1, 1:n+1] = 2 * (psi[m, 1:n+1] - psi[m+1, 1:n+1])
return zet
def haloSWAP(x, lm, n, comm):
tag = 1
status = MPI.Status()
rank = comm.Get_rank()
size = comm.Get_size()
# no need to halo swap if serial:
if size > 1:
# send right boundaries and receive left ones
if rank == 0:
comm.Send(x[lm][1:n+1], rank+1, tag)
elif rank == size-1:
comm.Recv(x[0][1:n+1], rank-1, tag, status)
else:
comm.Sendrecv(x[lm][1:n+1], rank+1, tag, x[0][1:n+1], rank-1, tag, status)
# send left boundary and receive right
if rank == 0:
comm.Recv(x[lm+1][1:n+1], rank+1, tag, status)
elif rank == size-1:
comm.Send(x[1][1:n+1], rank-1, tag)
else:
comm.Sendrecv(x[1][1:n+1], rank-1, tag, x[lm+1][1:n+1], rank+1, tag, status)
##################################################################################################################################################################
# util.py
def write_data(lm, n, scale, psi, velfile, colfile, comm):
# mpi essentials
m = lm
rank = comm.Get_rank()
size = comm.Get_size()
# calculate velocities and hue2rgd
vel = np.zeros((m,n, 2))
rgb = np.zeros((m,n,3), dtype='i')
print(psi)
for i in range(0, m-1):
for j in range(0, n-1):
vel[i][j][0] = (psi[i+1][j+2]-psi[i+1][j])/2.0
vel[i][j][1] = -(psi[i+2][j+1]-psi[i][j+1])/2.0
v1 = vel[i][j][0]
v2 = vel[i][j][1]
hue = (v1*v1 + v2*v2)**0.4 # modvsq**0.4
rgb[i][j] = hue2rgb(hue)
if 0 == rank:
# Open the specified files
velout = open(velfile, "w")
#velout.write("{0} {1}\n".format(m/scale, n/scale))
colout = open(colfile, "w")
#colout.write("{0} {1}\n".format(m, n))
for irank in range(0, size):
if 0 == rank:
comm.Recv(rgb[0][0][0:3*m*n], source=irank, tag=1, status=MPI.Status())
comm.Recv(vel[0][0][0:2*m*n], source=irank, tag=1, status=MPI.Status())
for irank in range(0, m):
ix = irank*m+i+1
for j in range(0, n):
iy = j+1
colout.write(f'{ix} {iy} {rgb[i][j][0]:d} {rgb[i][j][1]:d} {rgb[i][j][2]:d}\n')
#print(((ix-1)%scale, int((scale-1)/2), (iy-1)%scale, int((scale-1)/2)))
scale_int = int((scale-1)/2)
if ((ix-1)%scale == scale_int) and (iy-1)%scale == scale_int:
velout.write(f'{ix} {iy} {vel[i][j][0]} {vel[i][j][1]}\n')
velout.close()
colout.close()
else:
comm.Send(rgb[0][0][0:3*m*n], dest=0, tag=1)
comm.Send(vel[0][0][0:2*m*n], dest=0, tag=1)
def writeplotfile(m, n, scale):
"""
Writing the plt-file to make the gnuplot
"""
print('scalefactor', scale)
with open('cfd.plt', 'w') as f:
f.write('set size square\nset key off'
'\nunset xtics\nunset ytics\n'
)
f.write(f'set xrange[{1-scale}:{m+scale}]\nset yrange[{1-scale}:{n+scale}]\n')
f.write(f"plot \"colourmap.dat\" w rgbimage, \"velocity.dat\" u 1:2:({scale}*0.75*$3/sqrt($3**2+$4**2)):({scale}*0.75*$4/sqrt($3**2+$4**2)) with vectors lc rgb \"#7F7F7F\"")
print("\nWritten gnuplot script 'cfd.plt'\n");
def hue2rgb(hue):
rgbmax = 255
r = int(rgbmax*colfunc(hue-1.0))
g = int(rgbmax*colfunc(hue-0.5))
b = int(rgbmax*colfunc(hue))
return int(r), int(g), int(b)
def colfunc(x):
x1=0.2
x2=0.5
absx=abs(x)
if absx > x2:
return 0.0
elif absx < x1:
return 1.0
else:
return 1.0-((absx-x1)/(x2-x1))**2
############################################################################################################################################
# jacobi.py
def jacobistep(psi, m, n):
"""
Generates one step of the jacobi function for the whole grid
"""
#return 0.25 * (psi[0:m, 1:n+1]+psi[2:m+2, 1:n+1]+psi[1:m+1, 0:n] + psi[1:m+1, 2:n+2])
return ne.evaluate("0.25 * (a + b + c + d)", {'a':psi[0:m, 1:n+1],'b':psi[2:m+2, 1:n+1],'c':psi[1:m+1, 0:n],'d':psi[1:m+1, 2:n+2]})
def jacobistepvort(zet, psi, m, n, re):
#print(np.sum(zet), np.sum(psi))
#psinew = 0.25 * (psi[0:m, 1:n+1]+psi[2:m+2, 1:n+1]+psi[1:m+1, 0:n] + psi[1:m+1, 2:n+2] - zet[1:m+1, 1:n+1])
psinew = ne.evaluate("0.25 * (a + b + c + d - e)", {'a':psi[0:m, 1:n+1],'b':psi[2:m+2, 1:n+1],'c':psi[1:m+1, 0:n],'d':psi[1:m+1, 2:n+2],'e':zet[1:m+1, 1:n+1]})
#zetnew = - re/16.0 * ((psi[1:m+1, 2:n+2]-psi[1:m+1, 0:n])*(zet[2:m+2, 1:n+1]-zet[0:m, 1:n+1]) - (psi[2:m+2, 1:n+1]-psi[0:m, 1:n+1])*(zet[1:m+1, 2:n+2]-zet[1:m+1, 0:n])) + (0.25*(zet[0:m, 1:n+1]+zet[2:m+2, 1:n+1]+zet[1:m+1, 0:n]+zet[1:m+1, 2:n+2]))
zetnew = ne.evaluate("- re / 16.0 * ((d - c) * (f - g) - (b - a) * (h - i)) + (0.25 * (f + g + h + i))", {'re':re,'a':psi[0:m, 1:n+1],'b':psi[2:m+2, 1:n+1],'c':psi[1:m+1, 0:n],'d':psi[1:m+1, 2:n+2],'f':zet[2:m+2, 1:n+1],'g':zet[0:m, 1:n+1],'h':zet[1:m+1, 2:n+2],'i':zet[1:m+1, 0:n]})
return psinew, zetnew
def deltasq(psi_os_zet_temp, oldarr, m, n):
dsq = np.sum(np.power(psi_os_zet_temp - oldarr[1: m+1, 1:n+1], 2))
return float(dsq)
##################################################################MAIN#################################################
# cfd_numpy.py MPI4PY MAIN-file
def main(argv):
# Test we have the correct number of arguments
if len(argv) < 2:
sys.stdout.write("Usage: cfd.py <scalefactor> <iterations> [reynolds]\n")
sys.exit(1)
# Get the systen parameters from the arguments
scalefactor = int(argv[0])
niter = int(argv[1])
# print interval
printfreq = 1000
# Set the minimum size parameters
bbase = 10
hbase = 15
wbase = 5
mbase = 32
nbase = 32
# Set the parameters for boundary conditions
b = bbase * scalefactor
h = hbase * scalefactor
w = wbase * scalefactor
# Set the dimensions of the array
m = mbase * scalefactor
n = nbase * scalefactor
# checkreynolds
checkerr = 0
# //tolerance for convergence. <=0 means do not check
tolerance = 0
#parallelisation parameters
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# check command line and add reynolds
if len(argv) == 3:
re = float(argv[2])
irrotational = 0
if 0 == rank:
print(f"Reynolds number = {re}")
else:
re = -1
irrotational = 1
if 0 == rank:
print("Irrotational flow\n")
# irrotational?
if not irrotational:
zet = np.zeros((m + 2, n + 2))
if rank == 0:
sys.stdout.write("\n2D CFD Simulation\n")
sys.stdout.write("=================\n")
sys.stdout.write("Scale factor = {0}\n".format(scalefactor))
sys.stdout.write("Iterations = {0}\n".format(niter))
# //calculate local size
lm = int(m/size)
# bnorm
bnorm = np.array([0.0])
# consistency check
if size*lm != m:
if 0 == rank:
print(f'Error: {m} dies not divide into {size} processes')
comm.MPI_Finalize()
if 0 == rank:
print(f'Running CFD on {m}x{n} grid using {size} processes')
# Write the simulation details
sys.stdout.write("\nGrid size = {0} x {1}\n".format(m, n))
# didn't need it
#print('before', scalefactor, niter, re, irrotational)
#broadcast runtime params to other processors
#comm.bcast(scalefactor, root=0) # MPI_Bcast(&scalefactor,1,MPI_INT,0,comm);
#comm.bcast(niter, root=0) # MPI_Bcast(&numiter,1,MPI_INT,0,comm);
#comm.bcast(re, root=0) # MPI_Bcast(&re,1,MPI_DOUBLE,0,comm);
#comm.bcast(irrotational, root=0) # MPI_Bcast(&irrotational,1,MPI_INT,0,comm);
#print('after bcast', scalefactor, niter, re, irrotational)
# reynolds number
re = re / scalefactor
# //do we stop because of tolerance?
if tolerance > 0:
checkerr = 1
# Define the psi array of dimension [m+2][n+2] and set it to zero
psi = np.zeros((lm + 2, n + 2))
# Set the psi boundary conditions
boundarypsi(psi, lm, n, b, h, w, comm)
# compute normalisation factor for error
localbnorm = 0
# better than double for-loop:
localbnorm += np.sum(psi * psi) # this is not working, just keep for the moment the iterative version
# boundary swap of psi
haloSWAP(psi, lm, n, comm)
if not irrotational:
# update zeta BCs that depends on psi
boundaryzet(zet, psi, lm, n, comm)
# update normalisation
localbnorm += np.sum(zet * zet)
# boundary swap of psi
haloSWAP(zet, lm, n, comm)
comm.Allreduce(sendbuf=localbnorm, recvbuf=bnorm, op=MPI.SUM)
bnorm = np.sqrt(bnorm)
# Call the Jacobi iterative loop (and calculate timings)
if 0 == rank:
sys.stdout.write("\nStarting main Jacobi loop ...\n\n")
#barrier for accurate timing - not needed for correctness
comm.Barrier()
tstart = MPI.Wtime()
# -------------------
for iter in range(1, niter + 1):
# //calculate psi for next iteration
if irrotational:
psitmp = jacobistep(psi, lm, n)
else:
psitmp, zettmp = jacobistepvort(zet, psi, lm, n, re)
# //calculate current error if required
if checkerr or iter == niter:
localerror = deltasq(psitmp, psi, lm, n)
if not irrotational:
localerror += deltasq(zettmp, zet, lm, n)
# only rank 0 has the "error" variable!
error = comm.reduce(localerror, op=MPI.SUM)
if 0 == rank:
error = np.sqrt(error) / bnorm
# //copy back but not all!!
psi[1:lm+1, 1:n+1] = psitmp
if not irrotational:
# //copy back but not all!!
zet[1:lm+1, 1:n+1] = zettmp
# do a boundary swap
haloSWAP(psi, lm, n, comm)
if not irrotational:
haloSWAP(zet, lm, n, comm)
# update zeta BCs that depend on psi
boundaryzet(zet, psi, lm, n, comm)
# //quit early if we have reached required tolerance
if 0 == rank and checkerr and error < tolerance:
print(f"Converged on iteration {iter}")
break
# //print loop information
if (iter % printfreq == 0) and 0 == rank:
if not checkerr:
print(f"Completed iteration {iter}")
else:
print(f"Completed iteration {iter}, error = {error}\n")
if iter > niter:
iter = niter
# -------------------
#barrier for accurate timing - not needed for correctness
comm.Barrier()
tend = MPI.Wtime()
ttot = tend - tstart
titer = ttot / niter
# print out some stats
if 0 == rank:
print("\n... finished\n")
print(f"After {iter} iterations, the error is {error}\n")
print(f"Time for {iter} iterations was {ttot} seconds\n")
print(f"Each iteration took {titer} seconds\n")
# Write the output files for subsequent visualisation
#write_data(m, n, scalefactor, psi, "velocity.dat", "colourmap.dat", comm)
# generate gnuplot file
# Finish nicely
if 0 == rank:
# writeplotfile(m, n, scalefactor)
sys.exit(0)
MPI.Finalize()
##############################################################
# Function to create tidy way to have main method
if __name__ == "__main__":
main(sys.argv[1:])
##############################################################
|
[] |
[] |
[
"NUMEXPR_MAX_THREADS"
] |
[]
|
["NUMEXPR_MAX_THREADS"]
|
python
| 1 | 0 | |
vendor/github.com/kubernetes-incubator/service-catalog/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go
|
package adal
import (
"encoding/json"
"io/ioutil"
"os"
"path"
"reflect"
"runtime"
"strings"
"testing"
)
const MockTokenJSON string = `{
"access_token": "accessToken",
"refresh_token": "refreshToken",
"expires_in": "1000",
"expires_on": "2000",
"not_before": "3000",
"resource": "resource",
"token_type": "type"
}`
var TestToken = Token{
AccessToken: "accessToken",
RefreshToken: "refreshToken",
ExpiresIn: "1000",
ExpiresOn: "2000",
NotBefore: "3000",
Resource: "resource",
Type: "type",
}
func writeTestTokenFile(t *testing.T, suffix string, contents string) *os.File {
f, err := ioutil.TempFile(os.TempDir(), suffix)
if err != nil {
t.Fatalf("azure: unexpected error when creating temp file: %v", err)
}
defer f.Close()
_, err = f.Write([]byte(contents))
if err != nil {
t.Fatalf("azure: unexpected error when writing temp test file: %v", err)
}
return f
}
func TestLoadToken(t *testing.T) {
f := writeTestTokenFile(t, "testloadtoken", MockTokenJSON)
defer os.Remove(f.Name())
expectedToken := TestToken
actualToken, err := LoadToken(f.Name())
if err != nil {
t.Fatalf("azure: unexpected error loading token from file: %v", err)
}
if *actualToken != expectedToken {
t.Fatalf("azure: failed to decode properly expected(%v) actual(%v)", expectedToken, *actualToken)
}
// test that LoadToken closes the file properly
err = SaveToken(f.Name(), 0600, *actualToken)
if err != nil {
t.Fatalf("azure: could not save token after LoadToken: %v", err)
}
}
func TestLoadTokenFailsBadPath(t *testing.T) {
_, err := LoadToken("/tmp/this_file_should_never_exist_really")
expectedSubstring := "failed to open file"
if err == nil || !strings.Contains(err.Error(), expectedSubstring) {
t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error())
}
}
func TestLoadTokenFailsBadJson(t *testing.T) {
gibberishJSON := strings.Replace(MockTokenJSON, "expires_on", ";:\"gibberish", -1)
f := writeTestTokenFile(t, "testloadtokenfailsbadjson", gibberishJSON)
defer os.Remove(f.Name())
_, err := LoadToken(f.Name())
expectedSubstring := "failed to decode contents of file"
if err == nil || !strings.Contains(err.Error(), expectedSubstring) {
t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error())
}
}
func token() *Token {
var token Token
json.Unmarshal([]byte(MockTokenJSON), &token)
return &token
}
func TestSaveToken(t *testing.T) {
f, err := ioutil.TempFile("", "testloadtoken")
if err != nil {
t.Fatalf("azure: unexpected error when creating temp file: %v", err)
}
defer os.Remove(f.Name())
f.Close()
mode := os.ModePerm & 0642
err = SaveToken(f.Name(), mode, *token())
if err != nil {
t.Fatalf("azure: unexpected error saving token to file: %v", err)
}
fi, err := os.Stat(f.Name()) // open a new stat as held ones are not fresh
if err != nil {
t.Fatalf("azure: stat failed: %v", err)
}
if runtime.GOOS != "windows" { // permissions don't work on Windows
if perm := fi.Mode().Perm(); perm != mode {
t.Fatalf("azure: wrong file perm. got:%s; expected:%s file :%s", perm, mode, f.Name())
}
}
var actualToken Token
var expectedToken Token
json.Unmarshal([]byte(MockTokenJSON), expectedToken)
contents, err := ioutil.ReadFile(f.Name())
if err != nil {
t.Fatal("!!")
}
json.Unmarshal(contents, actualToken)
if !reflect.DeepEqual(actualToken, expectedToken) {
t.Fatal("azure: token was not serialized correctly")
}
}
func TestSaveTokenFailsNoPermission(t *testing.T) {
pathWhereWeShouldntHavePermission := "/usr/thiswontwork/atall"
if runtime.GOOS == "windows" {
pathWhereWeShouldntHavePermission = path.Join(os.Getenv("windir"), "system32\\mytokendir\\mytoken")
}
err := SaveToken(pathWhereWeShouldntHavePermission, 0644, *token())
expectedSubstring := "failed to create directory"
if err == nil || !strings.Contains(err.Error(), expectedSubstring) {
t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err)
}
}
func TestSaveTokenFailsCantCreate(t *testing.T) {
tokenPath := "/thiswontwork"
if runtime.GOOS == "windows" {
tokenPath = path.Join(os.Getenv("windir"), "system32")
}
err := SaveToken(tokenPath, 0644, *token())
expectedSubstring := "failed to create the temp file to write the token"
if err == nil || !strings.Contains(err.Error(), expectedSubstring) {
t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err)
}
}
|
[
"\"windir\"",
"\"windir\""
] |
[] |
[
"windir"
] |
[]
|
["windir"]
|
go
| 1 | 0 | |
queue.py
|
#!/usr/bin/env python
import os
import traceback
import sys
import argparse
import subprocess
import re
import fnmatch
import urllib
import json
if sys.version_info >= (3, 0):
import urllib
from urllib.parse import urlencode
from urllib.request import Request, urlopen
else:
from urllib import urlencode
import urllib2
from urllib2 import Request, urlopen
env = os.environ
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--token", help="Token to authenticate (not needed for public projects on appveyor, travis and circle-ci")
parser.add_argument("-n", "--name", help="Custom name for the text run")
parser.add_argument("-i", "--title", help="Custom output title")
parser.add_argument("-r", "--root_dir", help="The root directory of the git-project, to be used for aligning paths properly. Default is the git-root.")
parser.add_argument("-s", "--sha", help="Specify the commit sha - normally determined by invoking git")
parser.add_argument("-u", "--slug", help="Slug of the reporistory, e.g. report-ci/scripts")
parser.add_argument("-x", "--text", help="Text for the placeholder")
parser.add_argument("-d", "--id_file" , help="The file to hold the check id given by github.", default=".report-ci-id.json")
args = parser.parse_args()
if "REPORT_CI_TOKEN" in env and not args.token:
args.token = env["REPORT_CI_TOKEN"]
commit = None
if args.sha:
commit = args.sha
if not commit:
commit = subprocess.check_output(["git" ,"rev-parse", "HEAD"]).decode().strip()
print(bcolors.OKBLUE + ' CommitHash: ' + commit + '' + bcolors.ENDC)
root_dir = args.root_dir
if not root_dir:
root_dir = subprocess.check_output(["git" ,"rev-parse", "--show-toplevel"]).decode().replace('\n', '')
print (bcolors.OKBLUE + ' RootDir: ' + root_dir + bcolors.ENDC)
owner, repo = None, None
if args.slug:
try:
(owner, repo) = args.slug.split('/')
except:
print (bcolors.WARNING + ' InvalidSlug: {0}'.format(args.slug) + bcolors.ENDC)
exit(1)
if not owner or not repo:
remote_v = subprocess.check_output(["git" ,"remote", "-v"]).decode()
match = re.search(r"(?:https://|ssh://git@)github.com/([-_A-Za-z0-9]+)/((?:(?!\.git(?:\s|$))[-._A-Za-z0-9])+)", remote_v)
if match:
owner = match.group(1)
repo = match.group(2)
else:
match = re.search(r"git@github\.com:([-_A-Za-z0-9]+)/((?:(?!\.git(?:\s|$))[-._A-Za-z0-9])+)", remote_v)
owner = match.group(1)
repo = match.group(2)
print (bcolors.OKBLUE + ' Project: ' + owner + '/' + repo + bcolors.ENDC)
query = {
'owner': owner,
'repo': repo,
'head-sha': commit,
'root-dir': root_dir
}
if args.name:
query['run-name'] = args.name
if args.title:
query['title'] = args.title
url = "https://api.report.ci/queue"
if sys.version_info >= (3, 0):
url = urllib.request.urlopen(url).geturl()
else:
url = urllib.urlopen(url).geturl()
upload_content = ""
if args.text:
upload_content = open(args.text, "r").read()
uc = bytes(upload_content, "utf8") if sys.version_info >= (3, 0) else upload_content
request = Request(url + "?" + urlencode(query), uc)
request.add_header("Authorization", "Bearer " + args.token)
request.add_header("Content-Type", "text/plain")
request.get_method = lambda: 'POST'
try:
response = urlopen(request).read().decode()
res = json.loads(response)
ch_id = str(res["github"])
print ('Queued check_run https://github.com/{}/{}/runs/{}'.format(owner, repo, ch_id))
open(args.id_file, 'w').write(response)
exit(0)
except Exception as e:
sys.stderr.write("Exception: " + str(e))
print(bcolors.FAIL + 'Queueing failed: {0}'.format(e) + bcolors.ENDC)
print(e.read())
exit(1)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
operator/common.go
|
package operator
/*
Copyright 2019 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"bytes"
"os"
"strings"
crv1 "github.com/crunchydata/postgres-operator/apis/cr/v1"
"github.com/crunchydata/postgres-operator/config"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
)
var CRUNCHY_DEBUG bool
var NAMESPACE string
var InstallationName string
var PgoNamespace string
var EventTCPAddress = "localhost:4150"
var Pgo config.PgoConfig
type containerResourcesTemplateFields struct {
RequestsMemory, RequestsCPU string
LimitsMemory, LimitsCPU string
}
func Initialize(clientset *kubernetes.Clientset) {
tmp := os.Getenv("CRUNCHY_DEBUG")
if tmp == "true" {
CRUNCHY_DEBUG = true
log.Debug("CRUNCHY_DEBUG flag set to true")
} else {
CRUNCHY_DEBUG = false
log.Info("CRUNCHY_DEBUG flag set to false")
}
NAMESPACE = os.Getenv("NAMESPACE")
log.Infof("NAMESPACE %s", NAMESPACE)
if NAMESPACE == "" {
log.Error("NAMESPACE env var is set to empty string which pgo intprets as meaning you want it to watch 'all' namespaces.")
}
InstallationName = os.Getenv("PGO_INSTALLATION_NAME")
log.Infof("InstallationName %s", InstallationName)
if InstallationName == "" {
log.Error("PGO_INSTALLATION_NAME env var is required")
os.Exit(2)
}
PgoNamespace = os.Getenv("PGO_OPERATOR_NAMESPACE")
if PgoNamespace == "" {
log.Error("PGO_OPERATOR_NAMESPACE environment variable is not set and is required, this is the namespace that the Operator is to run within.")
os.Exit(2)
}
var err error
err = Pgo.GetConfig(clientset, PgoNamespace)
if err != nil {
log.Error(err)
log.Error("pgo-config files and templates did not load")
os.Exit(2)
}
log.Printf("PrimaryStorage=%v\n", Pgo.Storage["storage1"])
if Pgo.Cluster.CCPImagePrefix == "" {
log.Debug("pgo.yaml CCPImagePrefix not set, using default")
Pgo.Cluster.CCPImagePrefix = "crunchydata"
} else {
log.Debugf("pgo.yaml CCPImagePrefix set, using %s", Pgo.Cluster.CCPImagePrefix)
}
if Pgo.Pgo.PGOImagePrefix == "" {
log.Debug("pgo.yaml PGOImagePrefix not set, using default")
Pgo.Pgo.PGOImagePrefix = "crunchydata"
} else {
log.Debugf("PGOImagePrefix set, using %s", Pgo.Pgo.PGOImagePrefix)
}
if Pgo.Cluster.PgmonitorPassword == "" {
log.Debug("pgo.yaml PgmonitorPassword not set, using default")
Pgo.Cluster.PgmonitorPassword = "password"
}
if Pgo.Pgo.PGOImageTag == "" {
log.Error("pgo.yaml PGOImageTag not set, required ")
os.Exit(2)
}
tmp = os.Getenv("EVENT_TCP_ADDRESS")
if tmp != "" {
EventTCPAddress = tmp
}
log.Info("EventTCPAddress set to " + EventTCPAddress)
}
// GetContainerResources ...
func GetContainerResourcesJSON(resources *crv1.PgContainerResources) string {
//test for the case where no container resources are specified
if resources.RequestsMemory == "" || resources.RequestsCPU == "" ||
resources.LimitsMemory == "" || resources.LimitsCPU == "" {
return ""
}
fields := containerResourcesTemplateFields{}
fields.RequestsMemory = resources.RequestsMemory
fields.RequestsCPU = resources.RequestsCPU
fields.LimitsMemory = resources.LimitsMemory
fields.LimitsCPU = resources.LimitsCPU
doc := bytes.Buffer{}
err := config.ContainerResourcesTemplate.Execute(&doc, fields)
if err != nil {
log.Error(err.Error())
return ""
}
if log.GetLevel() == log.DebugLevel {
config.ContainerResourcesTemplate.Execute(os.Stdout, fields)
}
return doc.String()
}
// GetRepoType returns the proper repo type to set in container based on the
// backrest storage type provided
func GetRepoType(backrestStorageType string) string {
if backrestStorageType != "" && backrestStorageType == "s3" {
return "s3"
} else {
return "posix"
}
}
// IsLocalAndS3Storage a boolean indicating whether or not local and s3 storage should
// be enabled for pgBackRest based on the backrestStorageType string provided
func IsLocalAndS3Storage(backrestStorageType string) bool {
if backrestStorageType != "" && strings.Contains(backrestStorageType, "s3") &&
strings.Contains(backrestStorageType, "local") {
return true
}
return false
}
|
[
"\"CRUNCHY_DEBUG\"",
"\"NAMESPACE\"",
"\"PGO_INSTALLATION_NAME\"",
"\"PGO_OPERATOR_NAMESPACE\"",
"\"EVENT_TCP_ADDRESS\""
] |
[] |
[
"CRUNCHY_DEBUG",
"NAMESPACE",
"PGO_OPERATOR_NAMESPACE",
"EVENT_TCP_ADDRESS",
"PGO_INSTALLATION_NAME"
] |
[]
|
["CRUNCHY_DEBUG", "NAMESPACE", "PGO_OPERATOR_NAMESPACE", "EVENT_TCP_ADDRESS", "PGO_INSTALLATION_NAME"]
|
go
| 5 | 0 | |
quantization/ImageNet/semiBBattack.py
|
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
os.environ['TF_DETERMINISTIC_OPS'] = '1'
import tensorflow as tf
from tensorflow.python.framework.ops import enable_eager_execution
enable_eager_execution()
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_datasets as tfds
import tensorflow_model_optimization as tfmot
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras import backend as K
import time
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
# Quantization spec for Batchnormalization layer
class DefaultBNQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
def get_weights_and_quantizers(self, layer):
return []
def get_activations_and_quantizers(self, layer):
return []
def set_quantize_weights(self, layer, quantize_weights):
pass
def set_quantize_activations(self, layer, quantize_activations):
pass
def get_output_quantizers(self, layer):
return [tfmot.quantization.keras.quantizers.MovingAverageQuantizer(num_bits=8, per_axis=False, symmetric=False, narrow_range=False)]
def get_config(self):
return {}
# Quantization spec (null) for concat layer
class NoOpQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
"""Use this config object if the layer has nothing to be quantized for
quantization aware training."""
def get_weights_and_quantizers(self, layer):
return []
def get_activations_and_quantizers(self, layer):
return []
def set_quantize_weights(self, layer, quantize_weights):
pass
def set_quantize_activations(self, layer, quantize_activations):
pass
def get_output_quantizers(self, layer):
# Does not quantize output, since we return an empty list.
return []
def get_config(self):
return {}
# Quantization spec func for DenseNet
def apply_quantization(layer):
if 'bn' in layer.name:
return tfmot.quantization.keras.quantize_annotate_layer(layer,DefaultBNQuantizeConfig())
elif 'concat' in layer.name:
return tfmot.quantization.keras.quantize_annotate_layer(layer,NoOpQuantizeConfig())
else:
return tfmot.quantization.keras.quantize_annotate_layer(layer)
# hyper-parameters
BATCH_SIZE = 50
c = 1
grad_iterations = 20
step = 1
epsilon = 8
mode = 'm' # 'm' for MobileNet, 'r' for ResNet, 'd' for DenseNet
img_rows, img_cols, num_channel = 224 ,224, 3 # input image dimensions
#Load Dataset
es = {'file_name': tf.TensorSpec(shape=(), dtype=tf.string, name=None),
'image': tf.TensorSpec(shape=(img_rows, img_cols, num_channel), dtype=tf.float32, name=None),
'label': tf.TensorSpec(shape=(), dtype=tf.int64, name=None)}
mydataset = tf.data.experimental.load("../../datasets/ImageNet/quantization/3kImages/",es).batch(BATCH_SIZE).prefetch(1)
# Construct models
if mode == 'm':
model_ = tf.keras.applications.MobileNet(input_shape= (img_rows, img_cols,3))
q_model = tfmot.quantization.keras.quantize_model(model_)
model = tf.keras.applications.MobileNet(input_shape= (img_rows, img_cols,3))
d_model = tf.keras.applications.MobileNet(input_tensor = q_model.input)
model.load_weights("../../weights/fp_model_40_mobilenet.h5")# load model weight
q_model.load_weights("../../weights/q_model_40_mobilenet.h5")
d_model.load_weights("../../weights/distilled_fp_model_40_mobilenet.h5")
model.trainable = False
q_model.trainable = False
d_model.trainable = False
preprocess = tf.keras.applications.mobilenet.preprocess_input
decode = tf.keras.applications.mobilenet.decode_predictions
net = 'mobile'
elif mode == 'r':
model_ = ResNet50(input_shape= (img_rows, img_cols,3))
q_model = tfmot.quantization.keras.quantize_model(model_)
model = ResNet50(input_shape= (img_rows, img_cols,3))
d_model = ResNet50(input_tensor = q_model.input)
model.load_weights("../../weights/fp_model_40_resnet50.h5")# load model weight
q_model.load_weights("../../weights/q_model_40_resnet50.h5")
d_model.load_weights("../../weights/distilled_fp_model_40_resnet50.h5")
model.trainable = False
q_model.trainable = False
d_model.trainable = False
preprocess = tf.keras.applications.resnet.preprocess_input
decode = tf.keras.applications.resnet.decode_predictions
net = 'res'
else:
model_ = tf.keras.applications.DenseNet121(input_shape=(img_rows, img_cols,3))
# Create a base model
base_model = model_
# Helper function uses `quantize_annotate_layer` to annotate that only the
# Dense layers should be quantized.
LastValueQuantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer
MovingAverageQuantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer
# Use `tf.keras.models.clone_model` to apply `apply_quantization_to_dense`
# to the layers of the model.
annotated_model = tf.keras.models.clone_model(
base_model,
clone_function=apply_quantization,
)
with tfmot.quantization.keras.quantize_scope({'DefaultBNQuantizeConfig': DefaultBNQuantizeConfig, 'NoOpQuantizeConfig': NoOpQuantizeConfig}):
q_model = tfmot.quantization.keras.quantize_apply(annotated_model)
model = tf.keras.applications.DenseNet121(input_shape= (img_rows, img_cols,3))
d_model = tf.keras.applications.DenseNet121(input_tensor = q_model.input)
model.load_weights("../../weights/fp_model_40_densenet121.h5")# load model weight
q_model.load_weights("../../weights/q_model_40_densenet121.h5")
d_model.load_weights("../../weights/distilled_fp_model_40_densenet121.h5")
model.trainable = False
q_model.trainable = False
d_model.trainable = False
preprocess = tf.keras.applications.densenet.preprocess_input
decode = tf.keras.applications.densenet.decode_predictions
net = 'dense'
# DIVA attack for top-1
def second(image,label):
orig_img = tf.identity(image)
input_image = tf.identity(image)
# Compute clean prediction and aquire labels
orig_logist = tf.identity(model.predict(preprocess(input_image)[None,...]) )
orig_label = np.argmax(orig_logist[0])
quant_logist = tf.identity(q_model.predict(preprocess(input_image)[None,...]))
quant_label = np.argmax(quant_logist[0])
d_logist = tf.identity(d_model.predict(preprocess(input_image)[None,...]))
d_label = np.argmax(d_logist[0])
# Check for unqualified input
if orig_label != quant_label or orig_label != d_label:
print(orig_label)
return -2,-2,-2,-2,-2
if orig_label != label:
return -3,-3,-3,-3,-3
# Initialize attack to 0
A = 0
start_time = time.time()
for iters in range(0,grad_iterations):
# Compute loss
with tf.GradientTape() as g:
g.watch(input_image)
loss1 = K.mean(d_model(preprocess(input_image)[None,...], training = False)[..., orig_label])
loss2 = K.mean(q_model(preprocess(input_image)[None,...], training = False)[..., orig_label])
final_loss = K.mean(loss1 - c*loss2)
# Compute attack
grads = normalize(g.gradient(final_loss, input_image))
adv_image = input_image + tf.sign(grads) * step
A = tf.clip_by_value(adv_image - orig_img, -epsilon, epsilon)
input_image = tf.clip_by_value(orig_img + A, 0, 255)
test_image = preprocess(input_image)[None,...]
# Compute new predictions
pred1, pred2= d_model.predict(test_image), q_model.predict(test_image)
label1, label2 = np.argmax(pred1[0]), np.argmax(pred2[0])
pred3 = model.predict(test_image)
label3 = np.argmax(pred3[0])
if not label1 == label2:
if label1 == orig_label and decode(pred1, top=1)[0][0][2] > 0.6:
# If successfully fool the quantized model but not the distilled fp model
# also the conf score is higher than 0.6
# time to generate the successful attack
total_time = time.time() - start_time
gen_img_deprocessed = input_image# adversarial image
orig_img_deprocessed = orig_img # original image
A = (gen_img_deprocessed - orig_img_deprocessed).numpy() # attack
#Since the final goal for the attack is to keep undetected by the original model
#its still a failure if the original model mispredicted the label
if label3 != orig_label:
return -1, -1, -1, gen_img_deprocessed, A
norm = np.max(np.abs(A)) # adversarial distance
return total_time, norm, iters, gen_img_deprocessed, A
gen_img_deprocessed = input_image # generated non-adversarial image
orig_img_deprocessed = orig_img # original image
A = (gen_img_deprocessed - orig_img_deprocessed).numpy() # differences
return -1, -1, -1, gen_img_deprocessed, A
# Top-k evaluation
def topk(model_pred, qmodel_pred, k):
preds = decode(model_pred, top=k)
qpreds = decode(qmodel_pred, top=1)[0][0][1]
for pred in preds[0]:
if pred[1] == qpreds:
return True
return False
# DIVA attack for top-k
def secondk(image,k):
orig_img = tf.identity(image)
input_image = tf.identity(image)
# Compute clean prediction and aquire labels
orig_logist = tf.identity(model.predict(preprocess(input_image)[None,...]) )
orig_label = np.argmax(orig_logist[0])
quant_logist = tf.identity(q_model.predict(preprocess(input_image)[None,...]))
quant_label = np.argmax(quant_logist[0])
d_logist = tf.identity(d_model.predict(preprocess(input_image)[None,...]))
d_label = np.argmax(d_logist[0])
# Check for unqualified input
if orig_label != quant_label or orig_label != d_label:
return -2,-2,-2,-2,-2
# Initialize attack to 0
A = 0
start_time = time.time()
for iters in range(0,grad_iterations):
# Compute loss
with tf.GradientTape() as g:
g.watch(input_image)
loss1 = K.mean(d_model(preprocess(input_image)[None,...], training = False)[..., orig_label])
loss2 = K.mean(q_model(preprocess(input_image)[None,...], training = False)[..., orig_label])
final_loss = K.mean(loss1 - c*loss2)
# Compute attack
grads = normalize(g.gradient(final_loss, input_image))
adv_image = input_image + tf.sign(grads) * step
A = tf.clip_by_value(adv_image - orig_img, -epsilon, epsilon)
input_image = tf.clip_by_value(orig_img + A, 0, 255)
test_image = preprocess(input_image)[None,...]
# Compute new predictions
pred1, pred2= d_model.predict(test_image), q_model.predict(test_image)
label1, label2 = np.argmax(pred1[0]), np.argmax(pred2[0])
pred3 = model.predict(test_image)
label3 = np.argmax(pred3[0])
if not topk(pred1, pred2, k):
if label1 == orig_label and decode(pred1, top=1)[0][0][2] > 0.6:
# If successfully fool the quantized model but not the distilled fp model
# also the conf score is higher than 0.6
# time to generate the successful attack
total_time = time.time() - start_time
gen_img_deprocessed = input_image# adversarial image
orig_img_deprocessed = orig_img # original image
A = (gen_img_deprocessed - orig_img_deprocessed).numpy()# attack
#Since the final goal for the attack is to keep undetected by the original model
#its still a failure if the original model mispredicted the label
if label3 == orig_label and not topk(pred3, pred2, k):
norm = np.max(np.abs(A))# adversarial distance
return total_time, norm, iters, gen_img_deprocessed, A
else:
return -1, -1, -1, gen_img_deprocessed, A
gen_img_deprocessed = input_image# generated non-adversarial image
orig_img_deprocessed = orig_img# original image
A = (gen_img_deprocessed - orig_img_deprocessed).numpy()# differences
return -1, -1, -1, gen_img_deprocessed, A
def calc_normal_success(method, methodk, ds, folderName='', filterName='',dataName='',dataFolder='',locald = ''):
total=0 # number of images seen
badimg = 0 # number of unqualified images
count=0 # number of successful top-1 attack
top5 = 0 # number of successful top-5 attack
timeStore = [] # time to generate the top-1 attack
advdistStore = [] # adversarial distance for the top-1 attack
stepsStore = [] # steps took to generate the top-1 attack
timeStorek = []# time to generate the top-k (k=5) attack
advdistStorek = []# adversarial distance for the top-k attack
stepsStorek = []# steps took to generate the top-k attack
failure = 0 # number of failed attack
for i, features in enumerate(ds):
images = features['image']
labels = features['label']
for j,image in enumerate(images):
label = labels[j].numpy()
# attampt for the top-1 attack
time, advdist, steps, gen, A = method(image,label)
total += 1
# if attack failed
if time == -1:
print("Didnt find anything")
# np.save(locald + 'failure/' + folderName+"/"+dataName+str(failure)+"@"+str(total)+".npy", gen)
# np.save(locald + 'failure/' + filterName+"/"+dataName+str(failure)+"@"+str(total)+".npy", A)
failure +=1
continue
# if its a bad image
if time == -2:
badimg += 1
total -= 1
failure +=1
print("Bad Image",badimg)
continue
# if its an incorrect image
if time == -3:
badimg += 1
total -= 1
failure +=1
print("Incorrect Image",badimg)
continue
count += 1 # top-1 sucecced
# np.save(locald+folderName+"/"+dataName+str(count)+"@"+str(total)+".npy", gen)
# np.save(locald+filterName+"/"+dataName+str(count)+"@"+str(total)+".npy", A)
print("Number seen:",total)
print("No. worked:", count)
print("No. topk:", top5)
print("Bad Image:", badimg)
timeStore.append(time)
advdistStore.append(advdist)
stepsStore.append(steps)
# with open(locald+dataFolder+"/"+dataName+'_time_data.csv', 'a') as f:
# f.write(str(time) + ", ")
# with open(locald+dataFolder+"/"+dataName+'_advdist_data.csv', 'a') as f:
# f.write(str(advdist) + ", ")
# with open(locald+dataFolder+"/"+dataName+'_steps_data.csv', 'a') as f:
# f.write(str(steps) + ", ")
# attampt for the top-5 attack
print("starting k search")
time, advdist, steps, gen, A = methodk(image,5)
# if attack failed
if time == -1:
print("Didnt find anything in K")
#np.save(locald + 'failure/' + folderName+"/"+dataName+"k"+str(failure)+".npy", gen)
#np.save(locald + 'failure/' + filterName+"/"+ dataName+"k"+str(failure)+".npy", A)
continue
# if its a bad image
if time == -2:
print("Bad Image in K",badimg)
continue
top5 += 1
#np.save(locald+folderName+"/"+dataName+"k"+str(count)+".npy", gen)
#np.save(locald+filterName+"/"+dataName+"k"+str(count)+".npy", A)
timeStorek.append(time)
advdistStorek.append(advdist)
stepsStorek.append(steps)
#with open(locald+dataFolder+"/"+dataName+'_timek_data.csv', 'a') as f:
#f.write(str(time) + ", ")
#with open(locald+dataFolder+"/"+dataName+'_advdistk_data.csv', 'a') as f:
#f.write(str(advdist) + ", ")
#with open(locald+dataFolder+"/"+dataName+'_stepsk_data.csv', 'a') as f:
#f.write(str(steps) + ", ")
print("Number seen:",total)
print("No. worked:", count)
print("No. topk:", top5)
print("Bad Image:", badimg)
calc_normal_success(second,secondk,mydataset,
folderName=net + 'net_imagenet_images_second', filterName=net +'net_imagenet_filters_second',dataName='second', dataFolder=net +'net_imagenet_data_second', locald ='./results/SemiBB/' + net + 'net_c1/'+ net + 'net/')
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"TF_DETERMINISTIC_OPS"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "TF_DETERMINISTIC_OPS"]
|
python
| 2 | 0 | |
cmd/ecl2mond/main.go
|
package main
import (
"fmt"
"os"
"runtime"
. "github.com/nttcom/ecl2mond"
)
func main() {
defer func() {
if err := recover(); err != nil {
fmt.Fprintf(os.Stderr, "Error:%s\n", err)
os.Exit(1)
}
}()
os.Exit(_main())
}
func _main() int {
if os.Getenv("GOMAXPROCS") == "" {
runtime.GOMAXPROCS(1)
}
cli := NewEcl2mond()
if err := cli.Run(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
return 1
}
return 0
}
|
[
"\"GOMAXPROCS\""
] |
[] |
[
"GOMAXPROCS"
] |
[]
|
["GOMAXPROCS"]
|
go
| 1 | 0 | |
pkg/bindings/amqp/amqp_test.go
|
package amqp
import (
"io"
"net/url"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"pack.ag/amqp"
"github.com/cloudevents/sdk-go/pkg/binding"
"github.com/cloudevents/sdk-go/pkg/binding/test"
ce "github.com/cloudevents/sdk-go/pkg/cloudevents"
)
func TestSendReceiveBinary(t *testing.T) {
c, s, r := testSenderReceiver(t, ForceBinary())
defer c.Close()
test.EachEvent(t, test.Events(), func(t *testing.T, e ce.Event) {
eventIn := test.ExToStr(t, e)
in := binding.NewMockBinaryMessage(eventIn)
test.SendReceive(t, in, s, r, func(out binding.Message) {
eventOut, _, isBinary := test.MustToEvent(out)
assert.True(t, isBinary)
test.AssertEventEquals(t, eventIn, test.ExToStr(t, eventOut))
})
})
}
func TestSendReceiveStruct(t *testing.T) {
c, s, r := testSenderReceiver(t, ForceStructured())
defer c.Close()
test.EachEvent(t, test.Events(), func(t *testing.T, e ce.Event) {
eventIn := test.ExToStr(t, e)
in := binding.NewMockStructuredMessage(eventIn)
test.SendReceive(t, in, s, r, func(out binding.Message) {
eventOut, isStructured, _ := test.MustToEvent(out)
assert.True(t, isStructured)
test.AssertEventEquals(t, eventIn, test.ExToStr(t, eventOut))
})
})
}
func TestSendEventReceiveBinary(t *testing.T) {
c, s, r := testSenderReceiver(t, ForceBinary())
defer c.Close()
test.EachEvent(t, test.Events(), func(t *testing.T, e ce.Event) {
eventIn := test.ExToStr(t, e)
in := binding.EventMessage(eventIn)
test.SendReceive(t, in, s, r, func(out binding.Message) {
eventOut, _, isBinary := test.MustToEvent(out)
assert.True(t, isBinary)
test.AssertEventEquals(t, eventIn, test.ExToStr(t, eventOut))
})
})
}
func TestSendEventReceiveStruct(t *testing.T) {
c, s, r := testSenderReceiver(t, ForceStructured())
defer c.Close()
test.EachEvent(t, test.Events(), func(t *testing.T, e ce.Event) {
eventIn := test.ExToStr(t, e)
in := binding.EventMessage(eventIn)
test.SendReceive(t, in, s, r, func(out binding.Message) {
eventOut, isStructured, _ := test.MustToEvent(out)
assert.True(t, isStructured)
test.AssertEventEquals(t, eventIn, test.ExToStr(t, eventOut))
})
})
}
// TODO(alanconway) Need better self-test without external dependency.
// Ideally add AMQP server support to the binding.
// Some test require an AMQP broker or router. If the connection fails
// the tests are skipped. The env variable TEST_AMQP_URL can be set to the
// test URL, otherwise the default is "/test"
//
// On option is http://qpid.apache.org/components/dispatch-router/indexthtml.
// It can be installed from source or from RPMs, see https://qpid.apache.org/packages.html
// Run `qdrouterd` and the tests will work with no further config.
func testClient(t testing.TB) (client *amqp.Client, session *amqp.Session, addr string) {
t.Helper()
addr = "test"
s := os.Getenv("TEST_AMQP_URL")
if u, err := url.Parse(s); err == nil && u.Path != "" {
addr = u.Path
}
client, err := amqp.Dial(s)
if err != nil {
t.Skipf("ampq.Dial(%#v): %v", s, err)
}
session, err = client.NewSession()
require.NoError(t, err)
return client, session, addr
}
func testSenderReceiver(t testing.TB, senderOptions ...SenderOptionFunc) (io.Closer, binding.Sender, binding.Receiver) {
c, ss, a := testClient(t)
r, err := ss.NewReceiver(amqp.LinkSourceAddress(a))
require.NoError(t, err)
s, err := ss.NewSender(amqp.LinkTargetAddress(a))
require.NoError(t, err)
return c, NewSender(s, senderOptions...), &Receiver{r}
}
func BenchmarkSendReceive(b *testing.B) {
c, s, r := testSenderReceiver(b)
defer func() { require.NoError(b, c.Close()) }()
test.BenchmarkSendReceive(b, s, r)
}
|
[
"\"TEST_AMQP_URL\""
] |
[] |
[
"TEST_AMQP_URL"
] |
[]
|
["TEST_AMQP_URL"]
|
go
| 1 | 0 | |
st2tests/st2tests/resources/packs/pythonactions/actions/python_paths.py
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
from st2common.runners.base_action import Action
class PythonPathsAction(Action):
def run(self):
print("sys.path: %s" % (sys.path))
print("PYTHONPATH: %s" % (os.environ.get("PYTHONPATH")))
|
[] |
[] |
[
"PYTHONPATH"
] |
[]
|
["PYTHONPATH"]
|
python
| 1 | 0 | |
services/logs/app/logger.go
|
package app
import (
"fmt"
"io"
"os"
"path/filepath"
"time"
)
type Logger interface {
Access(timestamp int64, serverName, method, requestId, user, duration string)
Error(timestamp int64, serverName string, requestId string, data string)
System(timestamp int64, serverName string, data string)
}
type LoggerData struct {
oldName string
writer io.WriteCloser
Path string
}
func InitLogger(path string) Logger {
return &LoggerData{
Path: path,
}
}
func (l *LoggerData) getFileName() string {
y, m, d := time.Now().Date()
return filepath.Join(l.Path, fmt.Sprintf("%v.%v.%v.log", d, m, y))
}
func (l *LoggerData) setNewWriter() {
if l.writer != nil {
_ = l.writer.Close()
}
l.oldName = l.getFileName()
l.writer, _ = os.OpenFile(l.oldName, os.O_APPEND|os.O_CREATE|os.O_RDWR, os.ModePerm)
}
var accessTemplate = "%8s %22s | %12s | %28s | %12s | %v (%v)\n"
func (l *LoggerData) Access(timestamp int64, serverName, method, requestId, user, duration string) {
if l.getFileName() != l.oldName {
l.setNewWriter()
}
if os.Getenv("APP_MODE") == "debug" {
fmt.Print(fmt.Sprintf(accessTemplate,
"(ACCESS)",
timestampToDate(timestamp),
requestId,
user,
duration,
serverName,
method,))
}
_, _ = l.writer.Write([]byte(fmt.Sprintf(accessTemplate,
"(ACCESS)",
timestampToDate(timestamp),
requestId,
user,
duration,
serverName,
method, )))
}
var errorTemplate = "%8s %22s | %12s | %28s | %v\n"
func (l *LoggerData) Error(timestamp int64, serverName string, requestId string, data string) {
if l.getFileName() != l.oldName {
l.setNewWriter()
}
if os.Getenv("APP_MODE") == "debug" {
fmt.Print(fmt.Sprintf(errorTemplate, "(ERROR)", timestampToDate(timestamp), requestId, serverName, data))
}
_, _ = l.writer.Write([]byte(
fmt.Sprintf(
errorTemplate, "(ERROR)", timestampToDate(timestamp), requestId, serverName, data,
)))
}
var systemTemplate = "%8s %22s | %12s | %v\n"
func (l *LoggerData) System(timestamp int64, serverName string, data string) {
if l.getFileName() != l.oldName {
l.setNewWriter()
}
if os.Getenv("APP_MODE") == "debug" {
fmt.Print(fmt.Sprintf(systemTemplate, "(SYSTEM)", timestampToDate(timestamp), serverName, data))
}
_, _ = l.writer.Write([]byte(fmt.Sprintf(systemTemplate, "(SYSTEM)", timestampToDate(timestamp), serverName, data)))
}
func timestampToDate(in int64) string {
return time.Unix(0, in).In(time.Local).Format(time.RFC822)
}
|
[
"\"APP_MODE\"",
"\"APP_MODE\"",
"\"APP_MODE\""
] |
[] |
[
"APP_MODE"
] |
[]
|
["APP_MODE"]
|
go
| 1 | 0 | |
run.py
|
#!/usr/bin/env python3
from __future__ import print_function
import os
import sys
import time
import logging
import tempfile
import traceback
import subprocess
from shutil import disk_usage, rmtree
from base64 import b64decode
try:
import pathlib
import importlib.util
except ImportError:
pass
class GIT(object):
@classmethod
def works(cls):
try:
return bool(subprocess.check_output('git --version', shell=True))
except:
return False
class PIP(object):
@classmethod
def run(cls, command, check_output=False):
if not cls.works():
raise RuntimeError("Could not import pip.")
try:
return PIP.run_python_m(*command.split(), check_output=check_output)
except subprocess.CalledProcessError as e:
return e.returncode
except:
traceback.print_exc()
print("Error using -m method")
@classmethod
def run_python_m(cls, *args, **kwargs):
check_output = kwargs.pop('check_output', False)
check = subprocess.check_output if check_output else subprocess.check_call
return check([sys.executable, '-m', 'pip'] + list(args))
@classmethod
def run_pip_main(cls, *args, **kwargs):
import pip
args = list(args)
check_output = kwargs.pop('check_output', False)
if check_output:
from io import StringIO
out = StringIO()
sys.stdout = out
try:
pip.main(args)
except:
traceback.print_exc()
finally:
sys.stdout = sys.__stdout__
out.seek(0)
pipdata = out.read()
out.close()
print(pipdata)
return pipdata
else:
return pip.main(args)
@classmethod
def run_install(cls, cmd, quiet=False, check_output=False):
return cls.run("install %s%s" % ('-q ' if quiet else '', cmd), check_output)
@classmethod
def run_show(cls, cmd, check_output=False):
return cls.run("show %s" % cmd, check_output)
@classmethod
def works(cls):
try:
import pip
return True
except ImportError:
return False
# noinspection PyTypeChecker
@classmethod
def get_module_version(cls, mod):
try:
out = cls.run_show(mod, check_output=True)
if isinstance(out, bytes):
out = out.decode()
datas = out.replace('\r\n', '\n').split('\n')
expectedversion = datas[3]
if expectedversion.startswith('Version: '):
return expectedversion.split()[1]
else:
return [x.split()[1] for x in datas if x.startswith("Version: ")][0]
except:
pass
@classmethod
def get_requirements(cls, file='requirements.txt'):
from pip.req import parse_requirements
return list(parse_requirements(file))
# Setup initial loggers
tmpfile = tempfile.TemporaryFile('w+', encoding='utf8')
log = logging.getLogger('launcher')
log.setLevel(logging.DEBUG)
sh = logging.StreamHandler(stream=sys.stdout)
sh.setFormatter(logging.Formatter(
fmt="[%(levelname)s] %(name)s: %(message)s"
))
sh.setLevel(logging.INFO)
log.addHandler(sh)
tfh = logging.StreamHandler(stream=tmpfile)
tfh.setFormatter(logging.Formatter(
fmt="[%(relativeCreated).9f] %(asctime)s - %(levelname)s - %(name)s: %(message)s"
))
tfh.setLevel(logging.DEBUG)
log.addHandler(tfh)
def finalize_logging():
if os.path.isfile("logs/musicbot.log"):
log.info("Moving old musicbot log")
try:
if os.path.isfile("logs/musicbot.log.last"):
os.unlink("logs/musicbot.log.last")
os.rename("logs/musicbot.log", "logs/musicbot.log.last")
except:
pass
with open("logs/musicbot.log", 'w', encoding='utf8') as f:
tmpfile.seek(0)
f.write(tmpfile.read())
tmpfile.close()
f.write('\n')
f.write(" PRE-RUN SANITY CHECKS PASSED ".center(80, '#'))
f.write('\n\n')
global tfh
log.removeHandler(tfh)
del tfh
fh = logging.FileHandler("logs/musicbot.log", mode='a')
fh.setFormatter(logging.Formatter(
fmt="[%(relativeCreated).9f] %(name)s-%(levelname)s: %(message)s"
))
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
sh.setLevel(logging.INFO)
dlog = logging.getLogger('discord')
dlh = logging.StreamHandler(stream=sys.stdout)
dlh.terminator = ''
try:
dlh.setFormatter(logging.Formatter('.'))
except ValueError:
dlh.setFormatter(logging.Formatter('.', validate=False)
) # pylint: disable=unexpected-keyword-arg
dlog.addHandler(dlh)
def bugger_off(msg="Press enter to continue . . .", code=1):
input(msg)
sys.exit(code)
# TODO: all of this
def sanity_checks(optional=True):
log.info("Starting sanity checks")
# Required
# Make sure we're on Python 3.5+
req_ensure_py3()
# Fix windows encoding fuckery
req_ensure_encoding()
# Make sure we're in a writeable env
req_ensure_env()
# Make our folders if needed
req_ensure_folders()
# For rewrite only
req_check_deps()
log.info("Required checks passed.")
# Optional
if not optional:
return
# Check disk usage
opt_check_disk_space()
log.info("Optional checks passed.")
def req_ensure_py3():
log.info("Checking for Python 3.5+")
if sys.version_info < (3, 5):
log.warning("Python 3.5+ is required. This version is %s",
sys.version.split()[0])
log.warning("Attempting to locate Python 3.5...")
pycom = None
if sys.platform.startswith('win'):
log.info('Trying "py -3.5"')
try:
subprocess.check_output('py -3.5 -c "exit()"', shell=True)
pycom = 'py -3.5'
except:
log.info('Trying "python3"')
try:
subprocess.check_output('python3 -c "exit()"', shell=True)
pycom = 'python3'
except:
pass
if pycom:
log.info("Python 3 found. Launching bot...")
pyexec(pycom, 'run.py')
# I hope ^ works
os.system('start cmd /k %s run.py' % pycom)
sys.exit(0)
else:
log.info('Trying "python3.5"')
try:
pycom = subprocess.check_output(
'python3.5 -c "exit()"'.split()).strip().decode()
except:
pass
if pycom:
log.info(
"\nPython 3 found. Re-launching bot using: %s run.py\n", pycom)
pyexec(pycom, 'run.py')
log.critical(
"Could not find Python 3.5 or higher. Please run the bot using Python 3.5")
bugger_off()
def req_check_deps():
try:
import discord
if discord.version_info.major < 1:
log.critical(
"This version of MusicBot requires a newer version of pycord. Your version is {0}. Try running update.py.".format(discord.__version__))
bugger_off()
except ImportError:
# if we can't import pycord, an error will be thrown later down the line anyway
pass
def req_ensure_encoding():
log.info("Checking console encoding")
if sys.platform.startswith('win') or sys.stdout.encoding.replace('-', '').lower() != 'utf8':
log.info("Setting console encoding to UTF-8")
import io
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding='utf8', line_buffering=True)
# only slightly evil
sys.__stdout__ = sh.stream = sys.stdout
if os.environ.get('PYCHARM_HOSTED', None) not in (None, '0'):
log.info("Enabling colors in pycharm pseudoconsole")
sys.stdout.isatty = lambda: True
def req_ensure_env():
log.info("Ensuring we're in the right environment")
# if os.environ.get('APP_ENV') != 'docker' and not os.path.isdir(b64decode('LmdpdA==').decode('utf-8')):
# log.critical(b64decode(
# 'VGhlIGJvdCB3YXNuJ3QgaW5zdGFsbGVkIHVzaW5nIEdpdC4gUmVpbnN0YWxsIHVzaW5nIGh0dHBzOi8vYml0Lmx5L2pzY211c2ljYm90ZG9jcy4=').decode('utf-8'))
# bugger_off()
try:
assert os.path.isdir('config'), 'folder "config" not found'
assert os.path.isdir('musicbot'), 'folder "musicbot" not found'
assert os.path.isfile(
'musicbot/__init__.py'), 'musicbot folder is not a Python module'
assert importlib.util.find_spec(
'musicbot'), "musicbot module is not importable"
except AssertionError as e:
log.critical("Failed environment check, %s", e)
bugger_off()
try:
os.mkdir('musicbot-test-folder')
except Exception:
log.critical("Current working directory does not seem to be writable")
log.critical("Please move the bot to a folder that is writable")
bugger_off()
finally:
rmtree('musicbot-test-folder', True)
if sys.platform.startswith('win'):
log.info("Adding local bins/ folder to path")
os.environ['PATH'] += ';' + os.path.abspath('bin/')
sys.path.append(os.path.abspath('bin/')) # might as well
def req_ensure_folders():
pathlib.Path('logs').mkdir(exist_ok=True)
pathlib.Path('data').mkdir(exist_ok=True)
def opt_check_disk_space(warnlimit_mb=200):
if disk_usage('.').free < warnlimit_mb*1024*2:
log.warning(
"Less than %sMB of free space remains on this device" % warnlimit_mb)
#################################################
def pyexec(pycom, *args, pycom2=None):
pycom2 = pycom2 or pycom
os.execlp(pycom, pycom2, *args)
def main():
# TODO: *actual* argparsing
if '--no-checks' not in sys.argv:
sanity_checks()
finalize_logging()
import asyncio
if sys.platform == 'win32':
loop = asyncio.ProactorEventLoop() # needed for subprocesses
asyncio.set_event_loop(loop)
tried_requirementstxt = False
tryagain = True
loops = 0
max_wait_time = 60
while tryagain:
# Maybe I need to try to import stuff first, then actually import stuff
# It'd save me a lot of pain with all that awful exception type checking
m = None
try:
from musicbot import MusicBot
m = MusicBot()
sh.terminator = ''
sh.terminator = '\n'
m.run()
except SyntaxError:
log.exception("Syntax error (this is a bug, not your fault)")
break
except ImportError:
# TODO: if error module is in pip or dpy requirements...
if not tried_requirementstxt:
tried_requirementstxt = True
log.exception("Error starting bot")
log.info("Attempting to install dependencies...")
err = PIP.run_install('--upgrade -r requirements.txt')
if err: # TODO: add the specific error check back as not to always tell users to sudo it
print()
log.critical("You may need to %s to install dependencies." %
['use sudo', 'run as admin'][sys.platform.startswith('win')])
break
else:
print()
log.info("Ok lets hope it worked")
print()
else:
log.exception("Unknown ImportError, exiting.")
break
except Exception as e:
if hasattr(e, '__module__') and e.__module__ == 'musicbot.exceptions':
if e.__class__.__name__ == 'HelpfulError':
log.info(e.message)
break
elif e.__class__.__name__ == "TerminateSignal":
break
elif e.__class__.__name__ == "RestartSignal":
loops = 0
pass
else:
log.exception("Error starting bot")
finally:
if not m or not m.init_ok:
if any(sys.exc_info()):
# How to log this without redundant messages...
traceback.print_exc()
break
asyncio.set_event_loop(asyncio.new_event_loop())
loops += 1
sleeptime = min(loops * 2, max_wait_time)
if sleeptime:
log.info("Restarting in {} seconds...".format(loops*2))
time.sleep(sleeptime)
print()
log.info("All done.")
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APP_ENV",
"PYCHARM_HOSTED",
"PATH"
] |
[]
|
["APP_ENV", "PYCHARM_HOSTED", "PATH"]
|
python
| 3 | 0 | |
cmd/minishift/cmd/start_preflight.go
|
/*
Copyright (C) 2017 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"net"
"os"
"os/exec"
"strconv"
"strings"
"github.com/docker/machine/libmachine/drivers"
configCmd "github.com/minishift/minishift/cmd/minishift/cmd/config"
miniutil "github.com/minishift/minishift/pkg/minishift/util"
"github.com/minishift/minishift/pkg/util/os/atexit"
"github.com/spf13/viper"
)
const (
StorageDisk = "/mnt/sda1"
)
// preflightChecksAfterStartingHost is executed before the startHost function.
func preflightChecksBeforeStartingHost() {
switch viper.GetString(configCmd.VmDriver.Name) {
case "xhyve":
preflightCheckSucceedsOrFails(
configCmd.SkipCheckXHyveDriver.Name,
checkXhyveDriver,
"Checking if xhyve driver is installed",
false, configCmd.WarnCheckXHyveDriver.Name,
"See the 'Setting Up the Driver Plug-in' topic for more information")
case "kvm":
preflightCheckSucceedsOrFails(
configCmd.SkipCheckKVMDriver.Name,
checkKvmDriver,
"Checking if KVM driver is installed",
false, configCmd.WarnCheckXHyveDriver.Name,
"See the 'Setting Up the Driver Plug-in' topic for more information")
case "hyperv":
preflightCheckSucceedsOrFails(
configCmd.SkipCheckHyperVDriver.Name,
checkHypervDriver,
"Checking if Hyper-V driver is configured",
false, configCmd.WarnCheckHyperVDriver.Name,
"Hyper-V virtual switch is not set")
}
}
// preflightChecksAfterStartingHost is executed after the startHost function.
func preflightChecksAfterStartingHost(driver drivers.Driver) {
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipInstanceIP.Name,
checkInstanceIP, driver,
"Checking for IP address",
false, configCmd.WarnInstanceIP.Name,
"Error determining IP address")
/*
// This happens too late in the preflight, as provisioning needs an IP already
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipCheckNetworkHost.Name,
checkVMConnectivity, driver,
"Checking if VM is reachable from host",
configCmd.WarnCheckNetworkHost.Name,
"Please check our troubleshooting guide")
*/
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipCheckNetworkPing.Name,
checkIPConnectivity, driver,
"Checking if external host is reachable from the Minishift VM",
true, configCmd.WarnCheckNetworkPing.Name,
"VM is unable to ping external host")
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipCheckNetworkHTTP.Name,
checkHttpConnectivity, driver,
"Checking HTTP connectivity from the VM",
true, configCmd.WarnCheckNetworkHTTP.Name,
"VM cannot connect to external URL with HTTP")
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipCheckStorageMount.Name,
checkStorageMounted, driver,
"Checking if persistent storage volume is mounted",
false, configCmd.WarnCheckStorageMount.Name,
"Persistent volume storage is not mounted")
preflightCheckSucceedsOrFailsWithDriver(
configCmd.SkipCheckStorageUsage.Name,
checkStorageUsage, driver,
"Checking available disk space",
false, configCmd.WarnCheckStorageUsage.Name,
"Insufficient disk space on the persistent storage volume")
}
// preflightCheckFunc returns true when check passed
type preflightCheckFunc func() bool
// preflightCheckFunc used driver to interact with the VM instance and returns
// true when check passed
type preflightCheckWithDriverFunc func(driver drivers.Driver) bool
// preflightCheckSucceedsOrFails executes a pre-flight test function and prints
// the returned status in a standardized way. If the test fails and returns a
// false, the application will exit with errorMessage to describe what the
// cause is. It takes configNameOverrideIfSkipped to allow skipping the test.
// While treatAsWarning and configNameOverrideIfWarning can be used to make the
// test to be treated as a warning instead.
func preflightCheckSucceedsOrFails(configNameOverrideIfSkipped string, execute preflightCheckFunc, message string, treatAsWarning bool, configNameOverrideIfWarning string, errorMessage string) {
fmt.Printf("-- %s ... ", message)
isConfiguredToSkip := viper.GetBool(configNameOverrideIfSkipped)
isConfiguredToWarn := viper.GetBool(configNameOverrideIfWarning)
if isConfiguredToSkip {
fmt.Println("SKIP")
return
}
if execute() {
fmt.Println("OK")
return
}
fmt.Println("FAIL")
errorMessage = fmt.Sprintf(" %s", errorMessage)
if isConfiguredToWarn || treatAsWarning {
fmt.Println(errorMessage)
} else {
atexit.ExitWithMessage(1, errorMessage)
}
}
// preflightCheckSucceedsOrFails executes a pre-flight test function which uses
// the driver to interact with the VM instance. It prints the returned status in
// a standardized way. If the test fails and returns a false, the application
// will exit with errorMessage to describe what the cause is. It takes
// configNameOverrideIfSkipped to allow skipping the test. While treatAsWarning
// and configNameOverrideIfWarning can be used to make the test to be treated as
// a warning instead.
func preflightCheckSucceedsOrFailsWithDriver(configNameOverrideIfSkipped string, execute preflightCheckWithDriverFunc, driver drivers.Driver, message string, treatAsWarning bool, configNameOverrideIfWarning string, errorMessage string) {
fmt.Printf("-- %s ... ", message)
isConfiguredToSkip := viper.GetBool(configNameOverrideIfSkipped)
isConfiguredToWarn := viper.GetBool(configNameOverrideIfWarning)
if isConfiguredToSkip {
fmt.Println("SKIP")
return
}
if execute(driver) {
fmt.Println("OK")
return
}
fmt.Println("FAIL")
errorMessage = fmt.Sprintf(" %s", errorMessage)
if isConfiguredToWarn || treatAsWarning {
fmt.Println(errorMessage)
} else {
atexit.ExitWithMessage(1, errorMessage)
}
}
// checkXhyveDriver returns true if xhyve driver is available on path and has
// the setuid-bit set
func checkXhyveDriver() bool {
path, err := exec.LookPath("docker-machine-driver-xhyve")
if err != nil {
return false
}
fi, _ := os.Stat(path)
// follow symlinks
if fi.Mode()&os.ModeSymlink != 0 {
path, err = os.Readlink(path)
if err != nil {
return false
}
}
fmt.Println("\n Driver is available at", path)
fmt.Printf(" Checking for setuid bit ... ")
if fi.Mode()&os.ModeSetuid == 0 {
return false
}
return true
}
// checkKvmDriver returns true if KVM driver is available on path
func checkKvmDriver() bool {
path, err := exec.LookPath("docker-machine-driver-kvm")
if err != nil {
return false
}
fmt.Printf(fmt.Sprintf("\n Driver is available at %s ... ", path))
return true
}
// checkHypervDriver returns true if Virtual Switch has been selected
func checkHypervDriver() bool {
switchEnv := os.Getenv("HYPERV_VIRTUAL_SWITCH")
if switchEnv == "" {
return false
}
return true
}
// checkInstanceIP makes sure the instance has an IPv4 address.
// HyperV will issue IPv6 addresses on Internal virtual switch
// https://github.com/minishift/minishift/issues/418
func checkInstanceIP(driver drivers.Driver) bool {
ip, err := driver.GetIP()
if err == nil && net.ParseIP(ip).To4() != nil {
return true
}
return false
}
// checkVMConnectivity checks if VM instance IP is reachable from the host
func checkVMConnectivity(driver drivers.Driver) bool {
// used to check if the host can reach the VM
ip, _ := driver.GetIP()
cmd := exec.Command("ping", "-n 1", ip)
stdoutStderr, err := cmd.CombinedOutput()
if err != nil {
return false
}
fmt.Printf("%s\n", stdoutStderr)
return false
}
// checkIPConnectivity checks if the VM has connectivity to the outside network
func checkIPConnectivity(driver drivers.Driver) bool {
ipToPing := viper.GetString(configCmd.CheckNetworkPingHost.Name)
if ipToPing == "" {
ipToPing = "8.8.8.8"
}
fmt.Printf("\n Pinging %s ... ", ipToPing)
return miniutil.IsIPReachable(driver, ipToPing, false)
}
// checkHttpConnectivity allows to test outside connectivity and possible proxy support
func checkHttpConnectivity(driver drivers.Driver) bool {
urlToRetrieve := viper.GetString(configCmd.CheckNetworkHttpHost.Name)
if urlToRetrieve == "" {
urlToRetrieve = "http://minishift.io/index.html"
}
fmt.Printf("\n Retrieving %s ... ", urlToRetrieve)
return miniutil.IsRetrievable(driver, urlToRetrieve, false)
}
// checkStorageMounted checks if the peristent storage volume, storageDisk, is
// mounted to the VM instance
func checkStorageMounted(driver drivers.Driver) bool {
mounted, _ := isMounted(driver, StorageDisk)
return mounted
}
// checkStorageUsage checks if the peristent storage volume has enough storage
// space available.
func checkStorageUsage(driver drivers.Driver) bool {
usedPercentage := getDiskUsage(driver, StorageDisk)
fmt.Printf("%s ", usedPercentage)
usedPercentage = strings.TrimRight(usedPercentage, "%")
usage, err := strconv.ParseInt(usedPercentage, 10, 8)
if err != nil {
return false
}
if usage > 80 && usage < 95 {
fmt.Printf("!!! ")
}
if usage < 95 {
return true
}
return false
}
// isMounted checks returns usage of mountpoint known to the VM instance
func getDiskUsage(driver drivers.Driver, mountpoint string) string {
cmd := fmt.Sprintf(
"df -h %s | awk 'FNR > 1 {print $5}'",
mountpoint)
out, err := drivers.RunSSHCommandFromDriver(driver, cmd)
if err != nil {
return "ERR"
}
return strings.Trim(out, "\n")
}
// isMounted checks if mountpoint is mounted to the VM instance
func isMounted(driver drivers.Driver, mountpoint string) (bool, error) {
cmd := fmt.Sprintf(
"if grep -qs %s /proc/mounts; then echo '1'; else echo '0'; fi",
mountpoint)
out, err := drivers.RunSSHCommandFromDriver(driver, cmd)
if err != nil {
return false, err
}
if strings.Trim(out, "\n") == "0" {
return false, nil
}
return true, nil
}
|
[
"\"HYPERV_VIRTUAL_SWITCH\""
] |
[] |
[
"HYPERV_VIRTUAL_SWITCH"
] |
[]
|
["HYPERV_VIRTUAL_SWITCH"]
|
go
| 1 | 0 | |
src/TeamsNotifier.py
|
import os
import sys
import pymsteams
class TeamsNotifier:
def notification_type(self):
return "Teams"
def send(self, title, message):
if os.environ.get("WEBHOOK_URL") is None:
print("WEBHOOK_URL environment variable is not set")
sys.exit(1)
if os.environ.get("HTTPS_PROXY") is not None:
myTeamsMessage = pymsteams.connectorcard(os.environ.get("WEBHOOK_URL"), http_proxy=os.environ.get("HTTP_PROXY"), https_proxy=os.environ.get("HTTPS_PROXY"))
else:
myTeamsMessage = pymsteams.connectorcard(os.environ.get("WEBHOOK_URL"))
if title is not None:
myTeamsMessage.title(title)
if os.environ.get("MESSAGE_COLOR") is not None:
myTeamsMessage.color(os.environ.get("MESSAGE_COLOR"))
myTeamsMessage.text(message)
myTeamsMessage.send()
|
[] |
[] |
[
"MESSAGE_COLOR",
"HTTP_PROXY",
"WEBHOOK_URL",
"HTTPS_PROXY"
] |
[]
|
["MESSAGE_COLOR", "HTTP_PROXY", "WEBHOOK_URL", "HTTPS_PROXY"]
|
python
| 4 | 0 | |
main.go
|
package main
import (
"encoding/csv"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"regexp"
"time"
"gopkg.in/yaml.v2"
)
// Config is a struct for the config yaml parsing
type Config struct {
AtWorkPattern string `yaml:"at_work_pattern"`
OutputFileName string `yaml:"output_file"`
DefaultValues map[string]interface{} `yaml:"default_values"`
WorkingHours struct {
From int `yaml:"from"`
To int `yaml:"to"`
} `yaml:"working_hours"`
DateFormat string `yaml:"date_format"`
LogFileName string `yaml:"log_file"`
}
var c = Config{}
const day = 24 * time.Hour
func main() {
readConfig()
logs, err := os.OpenFile(c.LogFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("Error opening logfile: %v", err)
}
defer logs.Close()
logsOutput := io.MultiWriter(os.Stdout, logs)
log.SetOutput(logsOutput)
logTravelCosts()
}
func readConfig() {
home := os.Getenv("HOME")
f, err := ioutil.ReadFile(fmt.Sprintf("%s/.travelcosts.config.yml", home))
if err != nil {
log.Fatal("Failed reading config yaml!")
}
err = yaml.Unmarshal([]byte(f), &c)
if err != nil {
log.Fatal("Failed parsing config yaml!")
}
}
func logTravelCosts() {
if loggedToday() || !withinWorkingHours() {
return
}
_, err := os.Stat(c.OutputFileName)
outputFileExists := os.IsNotExist(err)
outputFile, err := os.OpenFile(c.OutputFileName, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Fatal("Failed opening output file!")
}
defer outputFile.Close()
w := csv.NewWriter(outputFile)
if !outputFileExists {
outputHeaders := []string{"Datum"}
for k := range c.DefaultValues {
outputHeaders = append(outputHeaders, k)
}
w.Write(outputHeaders)
}
if atWork() {
row := []string{time.Now().Format(c.DateFormat)}
for k := range c.DefaultValues {
row = append(row, c.DefaultValues[k].(string))
}
w.Write(row)
}
w.Flush()
}
func atWork() bool {
cmd := exec.Command(
"/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport",
"-I",
)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
airportInfo, err := ioutil.ReadAll(stdout)
if err != nil {
log.Fatal(err)
}
ssidRegex := regexp.MustCompile(`\sSSID:\s+(?P<network>\S.*)\n`)
match := ssidRegex.FindSubmatch(airportInfo)
if match == nil {
log.Fatal("Failed parsing SSID output!")
}
ssid := match[1]
atWorkRegex := regexp.MustCompile(c.AtWorkPattern)
atWork := atWorkRegex.Match(ssid)
log.Printf("Network: %s, at work: %t", ssid, atWork)
return atWork
}
func loggedToday() bool {
f, err := os.Open(c.OutputFileName)
if err != nil {
log.Fatalf("Could not open %s, Error: %s", c.OutputFileName, err)
}
defer f.Close()
reader := csv.NewReader(f)
records, err := reader.ReadAll()
if err != nil {
log.Fatalf("Failed reading %s", c.OutputFileName)
}
lastEntry := records[len(records)-1]
if lastEntry[0] == "Datum" {
return false
}
date, err := time.Parse(c.DateFormat, lastEntry[0])
if err != nil {
log.Fatalf("Failed parsing time %s with format %s", lastEntry[0], c.DateFormat)
}
if time.Now().Truncate(day).Equal(date.Truncate(day)) {
return true
}
return false
}
func withinWorkingHours() bool {
hour := time.Now().Hour()
working := hour >= c.WorkingHours.From && hour <= c.WorkingHours.To
log.Printf("Working: %t", working)
return working
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
e2e_test.go
|
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
gourl "net/url"
"os"
"path"
"reflect"
"strings"
"testing"
"time"
log "github.com/sirupsen/logrus"
"github.com/gofrs/uuid"
"github.com/ismrmrd/mrd-storage-server/api"
"github.com/ismrmrd/mrd-storage-server/core"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
db core.MetadataDatabase
blobStore core.BlobStore
router http.Handler
remoteUrl *gourl.URL
)
func init() {
log.SetOutput(ioutil.Discard)
if remoteUrlVar := os.Getenv("TEST_REMOTE_URL"); remoteUrlVar != "" {
var err error
remoteUrl, err = gourl.Parse(remoteUrlVar)
if err != nil {
log.Fatalf("Invalid TEST_REMOTE_URL value")
}
return
}
// Put the server in a non-UTC time zone so that we
// can verify that times are always returned in UTC and not the server's time zone.
time.Local = time.FixedZone("MyTimeZone", 3600)
config := loadConfig()
config.LogRequests = false
dbProvider := os.Getenv("TEST_DB_PROVIDER")
switch dbProvider {
case ConfigDatabaseProviderPostgresql:
config.DatabaseProvider = ConfigDatabaseProviderPostgresql
config.DatabaseConnectionString = "user=mrd password=mrd dbname=mrd host=localhost port=9920 sslmode=disable"
case "", ConfigDatabaseProviderSqlite:
// use defaults
default:
log.Fatalf("Unrecognized TEST_DB_PROVIDER environment variable '%s'", dbProvider)
}
storageProvider := os.Getenv("TEST_STORAGE_PROVIDER")
switch storageProvider {
case ConfigStorageProviderAzureBlob:
config.StorageProvider = ConfigStorageProviderAzureBlob
config.StorageConnectionString = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://localhost:10000/devstoreaccount1;"
case "", ConfigStorageProviderFileSystem:
// use defaults
default:
log.Fatalf("Unrecognized TEST_STORAGE_PROVIDER environment variable '%s'", storageProvider)
}
var err error
db, blobStore, err = assembleDataStores(config)
if err != nil {
log.Fatal(err)
}
router = assembleHandler(db, blobStore, config)
}
func TestInvalidTags(t *testing.T) {
cases := []struct {
name string
query string
}{
{"tag name with leading underscore", "subject=s&_r=s"},
{"tag name with unsupported char", "subject=s&a*=s"},
{"tag name that is too long", fmt.Sprintf("subject=sub&%s=abc", strings.Repeat("a", 65))},
{"Location", "subject=s&location=l"},
{"Last-Modified", "subject=s&lastModified=2021-10-18T16:56:15.693Z"},
{"Many Subject tags", "subject=s&subject=s2"},
{"Subject empty", "subject="},
{"No subject tag", ""},
{"Many Device tags", "subject=s&device=d1&device=d2"},
{"Many Name tags", "subject=s&name=n1&name=n2"},
{"Many Session tags", "subject=s&session=s1&session=s2"},
{"Tag value too long", fmt.Sprintf("subject=sub&a=%s", strings.Repeat("a", 200))},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
r := create(t, c.query, "text-plain", "hello")
assert.Equal(t, http.StatusBadRequest, r.StatusCode)
assert.NotNil(t, r.ErrorResponse)
})
}
}
func TestCreateValidBlob(t *testing.T) {
bodyContents := "this is the body"
subject := fmt.Sprint(time.Now().UnixNano())
// Create the blob
createResp := create(t, fmt.Sprintf("subject=%s&name=myname&device=mydevice", subject), "", bodyContents)
require.Equal(t, http.StatusCreated, createResp.StatusCode)
// now read the blob using the Location header in the response
readResp := read(t, createResp.Data)
assert.Equal(t, http.StatusOK, readResp.StatusCode)
assert.Equal(t, bodyContents, readResp.Body)
assert.Equal(t, "application/octet-stream", *readResp.Tags.ContentType)
assert.NotNil(t, readResp.CreatedAt)
assert.Equal(t, subject, readResp.Subject)
assert.Equal(t, "myname", *readResp.Tags.Name)
assert.Equal(t, "mydevice", *readResp.Tags.Device)
assert.Nil(t, readResp.Tags.Session)
searchResp := search(t, "subject="+subject)
require.Equal(t, http.StatusOK, searchResp.StatusCode)
assert.Len(t, searchResp.Results.Items, 1)
}
func TestCreateValidBlobCustomTags(t *testing.T) {
bodyContents := "this is the body"
subject := fmt.Sprint(time.Now().UnixNano())
// Create the blob
createResp := create(
t,
fmt.Sprintf("subject=%s&session=mysession&customtag1=customTag1Value&customTag2=customTag2Value1&customTag2=customTag2Value2", subject),
"text/plain",
bodyContents)
require.Equal(t, http.StatusCreated, createResp.StatusCode)
readResp := read(t, createResp.Data)
assert.Equal(t, http.StatusOK, readResp.StatusCode)
assert.Equal(t, bodyContents, readResp.Body)
assert.Equal(t, "text/plain", *readResp.Tags.ContentType)
assert.NotNil(t, readResp.CreatedAt)
assert.Equal(t, subject, readResp.Subject)
assert.Equal(t, "mysession", *readResp.Tags.Session)
assert.ElementsMatch(t, []string{"customTag1Value"}, readResp.Tags.CustomTags["Customtag1"])
assert.ElementsMatch(t, []string{"customTag2Value1", "customTag2Value2"}, readResp.Tags.CustomTags["Customtag2"])
searchResp := search(t, fmt.Sprintf("subject=%s&CustomTag2=customTag2Value1", subject))
assert.Equal(t, http.StatusOK, searchResp.StatusCode)
assert.Len(t, searchResp.Results.Items, 1)
searchResp = search(t, fmt.Sprintf("subject=%s&CustomTag2=customTag2Value1&CustomTag2=missing", subject))
assert.Equal(t, http.StatusOK, searchResp.StatusCode)
assert.Empty(t, searchResp.Results.Items)
searchResp = search(t, fmt.Sprintf("subject=%s&CustomTag2=customTag2Value1&CustomTag2=customTag2Value2", subject))
assert.Equal(t, http.StatusOK, searchResp.StatusCode)
assert.Len(t, searchResp.Results.Items, 1)
}
func TestCreateResponse(t *testing.T) {
body := "these are some bytes"
subject := "$null"
response := create(t, fmt.Sprintf("subject=%s&session=mysession", subject), "text/plain", body)
require.Equal(t, http.StatusCreated, response.StatusCode)
assert.NotNil(t, response.Meta["lastModified"])
assert.Equal(t, "text/plain", response.Meta["contentType"])
assert.Equal(t, "mysession", response.Meta["session"])
assert.Equal(t, "$null", response.Meta["subject"])
assert.Nil(t, response.Meta["name"])
assert.NotNil(t, response.Meta["location"])
assert.NotNil(t, response.Meta["data"])
}
func TestCreateResponseCustomTags(t *testing.T) {
body := "these are some bytes"
subject := fmt.Sprint(time.Now().UnixNano())
// Create the blob
response := create(
t,
fmt.Sprintf("subject=%s&session=mysession&customtag1=customTag1Value&customTag2=customTag2Value1&customTag2=customTag2Value2", subject),
"text/plain",
body)
require.Equal(t, http.StatusCreated, response.StatusCode)
require.Equal(t, "customTag1Value", response.Meta["customtag1"])
require.ElementsMatch(t, []string{"customTag2Value1", "customTag2Value2"}, response.Meta["customtag2"])
}
func TestCreateResponseMatchesBlobMeta(t *testing.T) {
body := "these are some bytes"
subject := "$null"
createResponse := create(t, fmt.Sprintf("subject=%s&session=mysession", subject), "text/plain", body)
require.Equal(t, http.StatusCreated, createResponse.StatusCode)
location, err := gourl.Parse(createResponse.Location)
require.Nil(t, err)
resp, err := executeRequest("GET", location.Path, nil, nil)
require.Nil(t, err)
readResponse := createMetaResponse(resp)
require.Equal(t, http.StatusOK, readResponse.StatusCode)
require.True(t, reflect.DeepEqual(createResponse.Meta, readResponse.Meta))
}
func TestSearchPaging(t *testing.T) {
subject := fmt.Sprint(time.Now().UnixNano())
// create several blobs with the same subject
totalItems := 10
originalQuery := fmt.Sprintf("subject=%s&mytag=t", subject)
for i := 0; i < totalItems; i++ {
require.Equal(t, http.StatusCreated, create(t, originalQuery, "", "").StatusCode)
}
for _, pageSize := range []int{3, 5, 8, 10, 11} {
t.Run(fmt.Sprintf("page size %d", pageSize), func(t *testing.T) {
link := fmt.Sprintf("/v1/blobs?subject=%s&mytag=t&_limit=%d", subject, pageSize)
items := make(map[string]bool)
for link != "" {
resp := search(t, link[strings.Index(link, "?")+1:])
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.LessOrEqual(t, len(resp.Results.Items), pageSize)
for _, v := range resp.Results.Items {
location := v["location"].(string)
assert.NotContains(t, items, location)
items[string(location)] = true
}
link = resp.Results.NextLink
}
assert.Equal(t, len(items), totalItems)
})
}
// now verify the behavior of the _at parameter for searches and get latest calls
fullResults := search(t, originalQuery+"&_limit=0") // <= 0 should be ignored
assert.Empty(t, fullResults.Results.NextLink)
for i := 1; i < len(fullResults.Results.Items); i++ {
previousResult := fullResults.Results.Items[i-1]
thisResult := fullResults.Results.Items[i]
if prevTime, thisTime := previousResult["lastModified"].(string), thisResult["lastModified"].(string); prevTime != thisTime {
assert.Regexp(t, "Z$", thisTime, "Datetime in response not in UTC")
atQuery := fmt.Sprintf("%s&_at=%s", originalQuery, gourl.QueryEscape(thisTime))
atRes := search(t, atQuery)
require.Equal(t, http.StatusOK, atRes.StatusCode)
assert.Equal(t, thisResult["location"].(string), atRes.Results.Items[0]["location"].(string))
latestResponse := getLatestBlob(t, atQuery)
require.Equal(t, http.StatusOK, latestResponse.StatusCode)
assert.Equal(t, thisResult["location"].(string), latestResponse.Location)
}
}
}
func TestInvalidSearches(t *testing.T) {
cases := []string{
"a=a",
"subject=x&_ct=3",
"subject=x&_ct=_ct=eyJ0cyI6MTYzNDU3NjE3NzA4MH0&_ct=eyJ0cyI6MTYzNDU3NjE3NzA4MH0",
"subject=x&_at=foobar",
"subject=x&_at=2021",
"subject=x&_at=2021-10-18T16:56:15.693Z&_at=2021-10-18T16:56:15.693Z",
}
for _, c := range cases {
t.Run(c, func(t *testing.T) {
resp := search(t, c)
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
}
}
func TestTagCaseSensitivity(t *testing.T) {
subject := fmt.Sprintf("S-%d", time.Now().UnixNano())
query := fmt.Sprintf("subject=%s&name=MYNAME&mytag=TAGVALUE1&MYTAG=TAGVALUE2", subject)
createResp := create(t, query, "", "")
require.Equal(t, http.StatusCreated, createResp.StatusCode)
assert.Empty(t, search(t, strings.ToLower(query)).Results.Items)
assert.NotEmpty(t, search(t, fmt.Sprintf("subject=%s&name=MYNAME&mytag=TAGVALUE1", subject)).Results.Items)
assert.NotEmpty(t, search(t, fmt.Sprintf("SUBJECT=%s&name=MYNAME&mytag=TAGVALUE1", subject)).Results.Items)
assert.NotEmpty(t, search(t, fmt.Sprintf("subject=%s&name=MYNAME&MYTAG=TAGVALUE1", subject)).Results.Items)
assert.Empty(t, search(t, fmt.Sprintf("subject=%s&name=MYNAME&mytag=TAGVALUE1", strings.ToLower(subject))).Results.Items)
assert.Empty(t, search(t, fmt.Sprintf("subject=%s&name=MYNAME&mytag=tagvalue1", subject)).Results.Items)
}
func TestUnicodeTags(t *testing.T) {
subject := fmt.Sprintf("S-%d", time.Now().UnixNano())
query := fmt.Sprintf("subject=%s&name=😁&mytag=😀", subject)
createResp := create(t, query, "", "")
require.Equal(t, http.StatusCreated, createResp.StatusCode)
items := search(t, query).Results.Items
require.NotEmpty(t, items)
assert.Equal(t, "😁", items[0]["name"].(string))
assert.Equal(t, "😀", items[0]["mytag"].(string))
readResponse := read(t, createResp.Data)
assert.Equal(t, "😁", *readResponse.Tags.Name)
assert.Equal(t, "😀", readResponse.Tags.CustomTags["Mytag"][0])
}
func Test404(t *testing.T) {
cases := []string{
"/",
fmt.Sprintf("/v1/blobs/latest?subject=%d", time.Now().UnixNano()),
"/v1/blobs/abc",
}
for _, c := range cases {
t.Run(c, func(t *testing.T) {
resp, err := executeRequest("GET", c, nil, nil)
require.Nil(t, err)
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
})
}
}
func TestNullSubject(t *testing.T) {
device := fmt.Sprint(time.Now().UnixNano())
query := "subject=$null&device=" + device
createResp := create(t, query, "", "hello")
readResp := read(t, createResp.Data)
assert.Equal(t, "hello", readResp.Body)
latestResp := getLatestBlob(t, query)
assert.Equal(t, "hello", latestResp.Body)
}
func search(t *testing.T, queryString string) SearchResponse {
resp, err := executeRequest("GET", fmt.Sprintf("/v1/blobs?%s", queryString), nil, nil)
require.Nil(t, err)
searchResponse := SearchResponse{}
searchResponse.RawResponse = resp
searchResponse.StatusCode = resp.StatusCode
searchResponseBody, _ := ioutil.ReadAll(resp.Body)
successResponse := api.SearchResponse{}
if json.Unmarshal(searchResponseBody, &successResponse) == nil {
searchResponse.Results = &successResponse
} else {
errorResponse := api.ErrorResponse{}
if json.Unmarshal(searchResponseBody, &errorResponse) == nil {
searchResponse.ErrorResponse = &errorResponse
}
}
return searchResponse
}
func create(t *testing.T, queryString, contentType, content string) MetaResponse {
var headers http.Header = nil
if contentType != "" {
headers = http.Header{}
headers.Set("Content-Type", contentType)
}
resp, err := executeRequest("POST", fmt.Sprintf("/v1/blobs/data?%s", queryString), headers, strings.NewReader(content))
require.Nil(t, err)
return createMetaResponse(resp)
}
func read(t *testing.T, url string) ReadResponse {
resp, err := executeRequest("GET", url, nil, nil)
require.Nil(t, err)
return populateBlobResponse(t, resp)
}
func createMetaResponse(resp *http.Response) MetaResponse {
response := MetaResponse{}
response.RawResponse = resp
response.StatusCode = resp.StatusCode
body, _ := ioutil.ReadAll(resp.Body)
errorResponse := api.ErrorResponse{}
if json.Unmarshal(body, &errorResponse) == nil {
response.ErrorResponse = &errorResponse
}
goodStatusCodes := map[int]bool{http.StatusCreated: true, http.StatusOK: true}
created := make(map[string]interface{})
if json.Unmarshal(body, &created) == nil && goodStatusCodes[resp.StatusCode] {
response.Meta = created
response.Location = created["location"].(string)
response.Data = created["data"].(string)
}
return response
}
func getLatestBlob(t *testing.T, queryString string) GetLatestResponse {
resp, err := executeRequest("GET", fmt.Sprintf("/v1/blobs/data/latest?%s", queryString), nil, nil)
require.Nil(t, err)
return GetLatestResponse{
ReadResponse: populateBlobResponse(t, resp),
Location: resp.Header.Get("Location"),
}
}
func populateBlobResponse(t *testing.T, resp *http.Response) ReadResponse {
readResponse := ReadResponse{}
readResponse.Tags.CustomTags = make(map[string][]string)
readResponse.RawResponse = resp
readResponse.StatusCode = resp.StatusCode
body, _ := ioutil.ReadAll(resp.Body)
readResponse.Body = string(body)
errorResponse := api.ErrorResponse{}
if json.Unmarshal(body, &errorResponse) == nil {
readResponse.ErrorResponse = &errorResponse
}
headers := resp.Header
if subject, ok := headers[api.TagHeaderName("Subject")]; ok {
assert.Len(t, subject, 1)
readResponse.Subject = subject[0]
delete(headers, "Subject")
}
if contentType, ok := headers["Content-Type"]; ok {
assert.Len(t, contentType, 1)
readResponse.Tags.ContentType = &contentType[0]
delete(headers, "Content-Type")
}
if lastModified, ok := headers["Last-Modified"]; ok {
assert.Len(t, lastModified, 1)
t, _ := time.Parse(http.TimeFormat, lastModified[0])
readResponse.CreatedAt = &t
delete(headers, "Last-Modified")
}
reflectionTags := reflect.ValueOf(&readResponse.Tags).Elem()
for k, v := range headers {
if !strings.HasPrefix(k, api.TagHeaderPrefix) {
continue
}
tagName := k[len(api.TagHeaderPrefix):]
f := reflectionTags.FieldByName(tagName)
if f.IsValid() {
tagValue := v[0]
if f.Kind() == reflect.Ptr {
f.Set(reflect.ValueOf(&tagValue))
} else {
f.SetString(tagValue)
}
} else {
readResponse.Tags.CustomTags[tagName] = v
}
}
return readResponse
}
func executeRequest(method string, url string, headers http.Header, body io.Reader) (*http.Response, error) {
if remoteUrl == nil {
request := httptest.NewRequest(method, url, body)
if headers != nil {
request.Header = headers
}
resp := httptest.NewRecorder()
router.ServeHTTP(resp, request)
return resp.Result(), nil
}
parsedUrl, err := gourl.Parse(url)
if err != nil {
return nil, err
}
fullUrl := url
if !parsedUrl.IsAbs() {
parsedFullUrl := *remoteUrl
parsedFullUrl.Path = path.Join(parsedFullUrl.Path, parsedUrl.Path)
parsedFullUrl.RawQuery = parsedUrl.RawQuery
fullUrl = parsedFullUrl.String()
}
request, err := http.NewRequest(method, fullUrl, body)
if err != nil {
return nil, err
}
if headers != nil {
request.Header = headers
}
return http.DefaultClient.Do(request)
}
func TestGarbageCollection(t *testing.T) {
if remoteUrl != nil {
// this test only works in-proc
return
}
keys := []core.BlobKey{
createKey(t, "s1"),
createKey(t, "s2"),
createKey(t, "s3"),
}
for _, key := range keys {
_, err := db.StageBlobMetadata(context.Background(), key, &core.BlobTags{})
require.Nil(t, err)
blobStore.SaveBlob(context.Background(), http.NoBody, key)
}
olderThan := time.Now().Add(time.Minute).UTC()
err := core.CollectGarbage(context.Background(), db, blobStore, olderThan)
require.Nil(t, err)
for _, key := range keys {
err := blobStore.ReadBlob(context.Background(), io.Discard, key)
assert.ErrorIs(t, err, core.ErrBlobNotFound)
assert.Nil(t, blobStore.DeleteBlob(context.Background(), key))
}
}
func TestStagedBlobsAreNotVisible(t *testing.T) {
if remoteUrl != nil {
// this test only works in-proc
return
}
subject := fmt.Sprint(time.Now().UnixNano())
key := createKey(t, subject)
tags := core.BlobTags{CustomTags: make(map[string][]string)}
_, err := db.StageBlobMetadata(context.Background(), key, &tags)
require.Nil(t, err)
err = blobStore.SaveBlob(context.Background(), http.NoBody, key)
require.Nil(t, err)
query := fmt.Sprintf("subject=%s", subject)
searchResponse := search(t, query)
assert.Empty(t, searchResponse.Results.Items)
latestResponse := getLatestBlob(t, query)
assert.Equal(t, http.StatusNotFound, latestResponse.StatusCode)
err = db.CompleteStagedBlobMetadata(context.Background(), key)
require.Nil(t, err)
searchResponse = search(t, query)
assert.Len(t, searchResponse.Results.Items, 1)
latestResponse = getLatestBlob(t, query)
assert.Equal(t, http.StatusOK, latestResponse.StatusCode)
}
func createKey(t *testing.T, subject string) core.BlobKey {
id, err := uuid.NewV4()
require.Nil(t, err)
return core.BlobKey{Subject: subject, Id: id}
}
type Response struct {
StatusCode int
RawResponse *http.Response
ErrorResponse *api.ErrorResponse
}
type SearchResponse struct {
Response
Results *api.SearchResponse
}
type MetaResponse struct {
Response
Location string
Data string
Meta map[string]interface{}
}
type ReadResponse struct {
Response
CreatedAt *time.Time
Body string
Subject string
Tags core.BlobTags
}
type GetLatestResponse struct {
ReadResponse
Location string
}
|
[
"\"TEST_REMOTE_URL\"",
"\"TEST_DB_PROVIDER\"",
"\"TEST_STORAGE_PROVIDER\""
] |
[] |
[
"TEST_STORAGE_PROVIDER",
"TEST_REMOTE_URL",
"TEST_DB_PROVIDER"
] |
[]
|
["TEST_STORAGE_PROVIDER", "TEST_REMOTE_URL", "TEST_DB_PROVIDER"]
|
go
| 3 | 0 | |
fe/fe-core/src/test/java/com/starrocks/utframe/UtFrameUtils.java
|
// This file is made available under Elastic License 2.0.
// This file is based on code available under the Apache license here:
// https://github.com/apache/incubator-doris/blob/master/fe/fe-core/src/test/java/org/apache/doris/utframe/UtFrameUtils.java
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.starrocks.utframe;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.starrocks.analysis.Analyzer;
import com.starrocks.analysis.CreateViewStmt;
import com.starrocks.analysis.InsertStmt;
import com.starrocks.analysis.SetVar;
import com.starrocks.analysis.SqlParser;
import com.starrocks.analysis.SqlScanner;
import com.starrocks.analysis.StatementBase;
import com.starrocks.analysis.StringLiteral;
import com.starrocks.analysis.UserIdentity;
import com.starrocks.catalog.Database;
import com.starrocks.catalog.DiskInfo;
import com.starrocks.catalog.OlapTable;
import com.starrocks.common.AnalysisException;
import com.starrocks.common.ClientPool;
import com.starrocks.common.Config;
import com.starrocks.common.DdlException;
import com.starrocks.common.Pair;
import com.starrocks.common.util.SqlParserUtils;
import com.starrocks.mysql.privilege.Auth;
import com.starrocks.planner.PlanFragment;
import com.starrocks.qe.ConnectContext;
import com.starrocks.qe.SessionVariable;
import com.starrocks.qe.VariableMgr;
import com.starrocks.server.GlobalStateMgr;
import com.starrocks.sql.InsertPlanner;
import com.starrocks.sql.StatementPlanner;
import com.starrocks.sql.analyzer.AnalyzerUtils;
import com.starrocks.sql.analyzer.SemanticException;
import com.starrocks.sql.ast.QueryStatement;
import com.starrocks.sql.ast.SelectRelation;
import com.starrocks.sql.common.SqlDigestBuilder;
import com.starrocks.sql.optimizer.OperatorStrings;
import com.starrocks.sql.optimizer.OptExpression;
import com.starrocks.sql.optimizer.Optimizer;
import com.starrocks.sql.optimizer.base.ColumnRefFactory;
import com.starrocks.sql.optimizer.base.ColumnRefSet;
import com.starrocks.sql.optimizer.base.PhysicalPropertySet;
import com.starrocks.sql.optimizer.dump.MockDumpInfo;
import com.starrocks.sql.optimizer.dump.QueryDumpInfo;
import com.starrocks.sql.optimizer.statistics.ColumnStatistic;
import com.starrocks.sql.optimizer.transformer.LogicalPlan;
import com.starrocks.sql.optimizer.transformer.RelationTransformer;
import com.starrocks.sql.parser.ParsingException;
import com.starrocks.sql.plan.ExecPlan;
import com.starrocks.sql.plan.PlanFragmentBuilder;
import com.starrocks.statistic.Constants;
import com.starrocks.system.Backend;
import com.starrocks.system.SystemInfoService;
import com.starrocks.thrift.TExplainLevel;
import com.starrocks.utframe.MockedFrontend.EnvVarNotSetException;
import com.starrocks.utframe.MockedFrontend.FeStartException;
import com.starrocks.utframe.MockedFrontend.NotInitException;
import org.apache.commons.codec.binary.Hex;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.io.StringReader;
import java.net.ServerSocket;
import java.nio.channels.FileLock;
import java.nio.file.Files;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static com.starrocks.sql.plan.PlanTestBase.setPartitionStatistics;
public class UtFrameUtils {
private final static AtomicInteger INDEX = new AtomicInteger(0);
private final static AtomicBoolean CREATED_MIN_CLUSTER = new AtomicBoolean(false);
public static final String createStatisticsTableStmt = "CREATE TABLE `table_statistic_v1` (\n" +
" `table_id` bigint(20) NOT NULL COMMENT \"\",\n" +
" `column_name` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `db_id` bigint(20) NOT NULL COMMENT \"\",\n" +
" `table_name` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `db_name` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `row_count` bigint(20) NOT NULL COMMENT \"\",\n" +
" `data_size` bigint(20) NOT NULL COMMENT \"\",\n" +
" `distinct_count` bigint(20) NOT NULL COMMENT \"\",\n" +
" `null_count` bigint(20) NOT NULL COMMENT \"\",\n" +
" `max` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `min` varchar(65530) NOT NULL COMMENT \"\",\n" +
" `update_time` datetime NOT NULL COMMENT \"\"\n" +
") ENGINE=OLAP\n" +
"UNIQUE KEY(`table_id`, `column_name`, `db_id`)\n" +
"COMMENT \"OLAP\"\n" +
"DISTRIBUTED BY HASH(`table_id`, `column_name`, `db_id`) BUCKETS 10\n" +
"PROPERTIES (\n" +
"\"replication_num\" = \"1\",\n" +
"\"in_memory\" = \"false\",\n" +
"\"storage_format\" = \"DEFAULT\"\n" +
");";
// Help to create a mocked ConnectContext.
public static ConnectContext createDefaultCtx() throws IOException {
ConnectContext ctx = new ConnectContext(null);
ctx.setCluster(SystemInfoService.DEFAULT_CLUSTER);
ctx.setCurrentUserIdentity(UserIdentity.ROOT);
ctx.setQualifiedUser(Auth.ROOT_USER);
ctx.setGlobalStateMgr(GlobalStateMgr.getCurrentState());
ctx.setThreadLocalInfo();
ctx.setDumpInfo(new MockDumpInfo());
return ctx;
}
// Parse an origin stmt . Return a StatementBase instance.
public static StatementBase parseStmtWithNewParser(String originStmt, ConnectContext ctx)
throws Exception {
StatementBase statementBase;
try {
statementBase =
com.starrocks.sql.parser.SqlParser.parse(originStmt, ctx.getSessionVariable().getSqlMode()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(statementBase, ctx);
} catch (ParsingException | SemanticException e) {
System.err.println("parse failed: " + e.getMessage());
if (e.getMessage() == null) {
throw e;
} else {
throw new AnalysisException(e.getMessage(), e);
}
}
return statementBase;
}
// Parse an origin stmt and analyze it. Return a StatementBase instance.
public static StatementBase parseAndAnalyzeStmt(String originStmt, ConnectContext ctx)
throws Exception {
SqlScanner input = new SqlScanner(new StringReader(originStmt), ctx.getSessionVariable().getSqlMode());
SqlParser parser = new SqlParser(input);
Analyzer analyzer = new Analyzer(ctx.getGlobalStateMgr(), ctx);
StatementBase statementBase = null;
try {
statementBase = SqlParserUtils.getFirstStmt(parser);
} catch (AnalysisException e) {
String errorMessage = parser.getErrorMsg(originStmt);
System.err.println("parse failed: " + errorMessage);
if (errorMessage == null) {
throw e;
} else {
throw new AnalysisException(errorMessage, e);
}
}
statementBase.analyze(analyzer);
return statementBase;
}
// for analyzing multi statements
public static List<StatementBase> parseAndAnalyzeStmts(String originStmt, ConnectContext ctx) throws Exception {
System.out.println("begin to parse stmts: " + originStmt);
SqlScanner input = new SqlScanner(new StringReader(originStmt), ctx.getSessionVariable().getSqlMode());
SqlParser parser = new SqlParser(input);
Analyzer analyzer = new Analyzer(ctx.getGlobalStateMgr(), ctx);
List<StatementBase> statementBases = null;
try {
statementBases = SqlParserUtils.getMultiStmts(parser);
} catch (AnalysisException e) {
String errorMessage = parser.getErrorMsg(originStmt);
System.err.println("parse failed: " + errorMessage);
if (errorMessage == null) {
throw e;
} else {
throw new AnalysisException(errorMessage, e);
}
}
for (StatementBase stmt : statementBases) {
stmt.analyze(analyzer);
}
return statementBases;
}
private static void startFEServer(String runningDir, boolean startBDB) throws EnvVarNotSetException, IOException,
FeStartException, NotInitException {
// get STARROCKS_HOME
String starRocksHome = System.getenv("STARROCKS_HOME");
if (Strings.isNullOrEmpty(starRocksHome)) {
starRocksHome = Files.createTempDirectory("STARROCKS_HOME").toAbsolutePath().toString();
}
Config.plugin_dir = starRocksHome + "/plugins";
// start fe in "STARROCKS_HOME/fe/mocked/"
MockedFrontend frontend = MockedFrontend.getInstance();
Map<String, String> feConfMap = Maps.newHashMap();
// set additional fe config
if (startBDB) {
feConfMap.put("edit_log_port", String.valueOf(findValidPort()));
}
feConfMap.put("tablet_create_timeout_second", "10");
frontend.init(starRocksHome + "/" + runningDir, feConfMap);
frontend.start(startBDB, new String[0]);
}
public synchronized static void createMinStarRocksCluster(boolean startBDB) {
// to avoid call createMinStarRocksCluster multiple times
if (CREATED_MIN_CLUSTER.get()) {
return;
}
try {
ClientPool.heartbeatPool = new MockGenericPool.HeatBeatPool("heartbeat");
ClientPool.backendPool = new MockGenericPool.BackendThriftPool("backend");
startFEServer("fe/mocked/test/" + UUID.randomUUID().toString() + "/", startBDB);
addMockBackend(10001);
// sleep to wait first heartbeat
int retry = 0;
while (GlobalStateMgr.getCurrentSystemInfo().getBackend(10001).getBePort() == -1 &&
retry++ < 600) {
Thread.sleep(100);
}
CREATED_MIN_CLUSTER.set(true);
} catch (Exception e) {
e.printStackTrace();
}
}
public static void createMinStarRocksCluster() {
createMinStarRocksCluster(false);
}
public static void addMockBackend(int backendId) throws Exception {
// start be
MockedBackend backend = new MockedBackend("127.0.0.1");
// add be
Backend be = new Backend(backendId, backend.getHost(), backend.getHeartBeatPort());
Map<String, DiskInfo> disks = Maps.newHashMap();
DiskInfo diskInfo1 = new DiskInfo(backendId + "/path1");
diskInfo1.setTotalCapacityB(1000000);
diskInfo1.setAvailableCapacityB(500000);
diskInfo1.setDataUsedCapacityB(480000);
disks.put(diskInfo1.getRootPath(), diskInfo1);
be.setDisks(ImmutableMap.copyOf(disks));
be.setAlive(true);
be.setOwnerClusterName(SystemInfoService.DEFAULT_CLUSTER);
be.setBePort(backend.getBeThriftPort());
be.setBrpcPort(backend.getBrpcPort());
be.setHttpPort(backend.getHttpPort());
GlobalStateMgr.getCurrentSystemInfo().addBackend(be);
}
public static void dropMockBackend(int backendId) throws DdlException {
GlobalStateMgr.getCurrentSystemInfo().dropBackend(backendId);
}
public static int findValidPort() {
String starRocksHome = System.getenv("STARROCKS_HOME");
File portDir = new File(starRocksHome + "/fe/ut_ports");
if (!portDir.exists()) {
Preconditions.checkState(portDir.mkdirs());
}
for (int i = 0; i < 10; i++) {
try (ServerSocket socket = new ServerSocket(0)) {
socket.setReuseAddress(true);
int port = socket.getLocalPort();
File file = new File(starRocksHome + "/fe/ut_ports/" + port);
if (file.exists()) {
continue;
}
RandomAccessFile accessFile = new RandomAccessFile(file, "rws");
FileLock lock = accessFile.getChannel().tryLock();
if (lock == null) {
continue;
}
System.out.println("find valid port " + port + new Date());
return port;
} catch (Exception e) {
e.printStackTrace();
throw new IllegalStateException("Could not find a free TCP/IP port " + e.getMessage());
}
}
throw new RuntimeException("can not find valid port");
}
public static Pair<String, ExecPlan> getPlanAndFragment(ConnectContext connectContext, String originStmt)
throws Exception {
connectContext.setDumpInfo(new QueryDumpInfo(connectContext.getSessionVariable()));
List<StatementBase> statements =
com.starrocks.sql.parser.SqlParser.parse(originStmt, connectContext.getSessionVariable().getSqlMode());
connectContext.getDumpInfo().setOriginStmt(originStmt);
SessionVariable oldSessionVariable = connectContext.getSessionVariable();
StatementBase statementBase = statements.get(0);
try {
// update session variable by adding optional hints.
if (statementBase instanceof QueryStatement &&
((QueryStatement) statementBase).getQueryRelation() instanceof SelectRelation) {
SelectRelation selectRelation = (SelectRelation) ((QueryStatement) statementBase).getQueryRelation();
Map<String, String> optHints = selectRelation.getSelectList().getOptHints();
if (optHints != null) {
SessionVariable sessionVariable = (SessionVariable) oldSessionVariable.clone();
for (String key : optHints.keySet()) {
VariableMgr.setVar(sessionVariable, new SetVar(key, new StringLiteral(optHints.get(key))),
true);
}
connectContext.setSessionVariable(sessionVariable);
}
}
ExecPlan execPlan = new StatementPlanner().plan(statementBase, connectContext);
if (statementBase instanceof QueryStatement && !connectContext.getDatabase().isEmpty() &&
!statementBase.isExplain()) {
String viewName = "view" + INDEX.getAndIncrement();
String createView = "create view " + viewName + " as " + originStmt;
CreateViewStmt createTableStmt =
(CreateViewStmt) UtFrameUtils.parseStmtWithNewParser(createView, connectContext);
try {
StatementBase viewStatement =
com.starrocks.sql.parser.SqlParser.parse(createTableStmt.getInlineViewDef(),
connectContext.getSessionVariable().getSqlMode()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(viewStatement, connectContext);
} catch (Exception e) {
System.out.println(e.getMessage());
throw e;
}
}
OperatorStrings operatorPrinter = new OperatorStrings();
return new Pair<>(operatorPrinter.printOperator(execPlan.getPhysicalPlan()), execPlan);
} finally {
// before returning we have to restore session variable.
connectContext.setSessionVariable(oldSessionVariable);
}
}
public static String getStmtDigest(ConnectContext connectContext, String originStmt) throws Exception {
StatementBase statementBase =
com.starrocks.sql.parser.SqlParser.parse(originStmt, connectContext.getSessionVariable().getSqlMode())
.get(0);
Preconditions.checkState(statementBase instanceof QueryStatement);
QueryStatement queryStmt = (QueryStatement) statementBase;
String digest = SqlDigestBuilder.build(queryStmt);
try {
MessageDigest md = MessageDigest.getInstance("MD5");
md.reset();
md.update(digest.getBytes());
return Hex.encodeHexString(md.digest());
} catch (NoSuchAlgorithmException e) {
return "";
}
}
private static String initMockEnv(ConnectContext connectContext, QueryDumpInfo replayDumpInfo) throws Exception {
// mock statistics table
StarRocksAssert starRocksAssert = new StarRocksAssert(connectContext);
if (!starRocksAssert.databaseExist("_statistics_")) {
starRocksAssert.withDatabaseWithoutAnalyze(Constants.StatisticsDBName)
.useDatabase(Constants.StatisticsDBName);
starRocksAssert.withTable(createStatisticsTableStmt);
}
// prepare dump mock environment
// statement
String replaySql = replayDumpInfo.getOriginStmt();
// session variable
connectContext.setSessionVariable(replayDumpInfo.getSessionVariable());
// create table
int backendId = 10002;
int backendIdSize = GlobalStateMgr.getCurrentSystemInfo().getBackendIds(true).size();
for (int i = 1; i < backendIdSize; ++i) {
UtFrameUtils.dropMockBackend(backendId++);
}
Set<String> dbSet = replayDumpInfo.getCreateTableStmtMap().keySet().stream().map(key -> key.split("\\.")[0])
.collect(Collectors.toSet());
dbSet.forEach(db -> {
if (starRocksAssert.databaseExist(db)) {
try {
starRocksAssert.dropDatabase(db);
} catch (Exception e) {
e.printStackTrace();
}
}
});
for (Map.Entry<String, String> entry : replayDumpInfo.getCreateTableStmtMap().entrySet()) {
String dbName = entry.getKey().split("\\.")[0];
if (!starRocksAssert.databaseExist(dbName)) {
starRocksAssert.withDatabase(dbName);
}
starRocksAssert.useDatabase(dbName);
starRocksAssert.withTable(entry.getValue());
}
// create view
for (Map.Entry<String, String> entry : replayDumpInfo.getCreateViewStmtMap().entrySet()) {
String createView = "create view " + entry.getKey() + " as " + entry.getValue();
starRocksAssert.withView(createView);
}
// mock be num
backendId = 10002;
for (int i = 1; i < replayDumpInfo.getBeNum(); ++i) {
UtFrameUtils.addMockBackend(backendId++);
}
// mock table row count
for (Map.Entry<String, Map<String, Long>> entry : replayDumpInfo.getPartitionRowCountMap().entrySet()) {
String dbName = entry.getKey().split("\\.")[0];
OlapTable replayTable = (OlapTable) connectContext.getGlobalStateMgr().getDb("default_cluster:" + dbName)
.getTable(entry.getKey().split("\\.")[1]);
for (Map.Entry<String, Long> partitionEntry : entry.getValue().entrySet()) {
setPartitionStatistics(replayTable, partitionEntry.getKey(), partitionEntry.getValue());
}
}
// mock table column statistics
for (Map.Entry<String, Map<String, ColumnStatistic>> entry : replayDumpInfo.getTableStatisticsMap()
.entrySet()) {
String dbName = entry.getKey().split("\\.")[0];
OlapTable replayTable = (OlapTable) connectContext.getGlobalStateMgr().getDb("default_cluster:" + dbName)
.getTable(entry.getKey().split("\\.")[1]);
for (Map.Entry<String, ColumnStatistic> columnStatisticEntry : entry.getValue().entrySet()) {
GlobalStateMgr.getCurrentStatisticStorage()
.addColumnStatistic(replayTable, columnStatisticEntry.getKey(),
columnStatisticEntry.getValue());
}
}
return replaySql;
}
private static void tearMockEnv() {
int backendId = 10002;
int backendIdSize = GlobalStateMgr.getCurrentSystemInfo().getBackendIds(true).size();
for (int i = 1; i < backendIdSize; ++i) {
try {
UtFrameUtils.dropMockBackend(backendId++);
} catch (DdlException e) {
e.printStackTrace();
}
}
}
private static Pair<String, ExecPlan> getQueryExecPlan(QueryStatement statement, ConnectContext connectContext) {
ColumnRefFactory columnRefFactory = new ColumnRefFactory();
LogicalPlan logicalPlan = new RelationTransformer(columnRefFactory, connectContext)
.transform((statement).getQueryRelation());
Optimizer optimizer = new Optimizer();
OptExpression optimizedPlan = optimizer.optimize(
connectContext,
logicalPlan.getRoot(),
new PhysicalPropertySet(),
new ColumnRefSet(logicalPlan.getOutputColumn()),
columnRefFactory);
ExecPlan execPlan = new PlanFragmentBuilder()
.createPhysicalPlan(optimizedPlan, connectContext,
logicalPlan.getOutputColumn(), columnRefFactory, new ArrayList<>());
OperatorStrings operatorPrinter = new OperatorStrings();
return new Pair<>(operatorPrinter.printOperator(optimizedPlan), execPlan);
}
private static Pair<String, ExecPlan> getInsertExecPlan(InsertStmt statement, ConnectContext connectContext) {
ExecPlan execPlan = new InsertPlanner().plan(statement, connectContext);
OperatorStrings operatorPrinter = new OperatorStrings();
return new Pair<>(operatorPrinter.printOperator(execPlan.getPhysicalPlan()), execPlan);
}
public static Pair<String, ExecPlan> getNewPlanAndFragmentFromDump(ConnectContext connectContext,
QueryDumpInfo replayDumpInfo) throws Exception {
String replaySql = initMockEnv(connectContext, replayDumpInfo);
Map<String, Database> dbs = null;
try {
StatementBase statementBase = com.starrocks.sql.parser.SqlParser.parse(replaySql,
connectContext.getSessionVariable().getSqlMode()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(statementBase, connectContext);
dbs = AnalyzerUtils.collectAllDatabase(connectContext, statementBase);
lock(dbs);
if (statementBase instanceof QueryStatement) {
return getQueryExecPlan((QueryStatement) statementBase, connectContext);
} else if (statementBase instanceof InsertStmt) {
return getInsertExecPlan((InsertStmt) statementBase, connectContext);
} else {
Preconditions.checkState(false, "Do not support the statement");
return null;
}
} finally {
unLock(dbs);
tearMockEnv();
}
}
private static String getThriftString(List<PlanFragment> fragments) {
StringBuilder str = new StringBuilder();
for (int i = 0; i < fragments.size(); ++i) {
if (i > 0) {
// a blank line between plan fragments
str.append("\n");
}
str.append(fragments.get(i).toThrift());
}
return str.toString();
}
public static String getFragmentPlan(ConnectContext connectContext, String sql) throws Exception {
return getPlanAndFragment(connectContext, sql).second.getExplainString(TExplainLevel.NORMAL);
}
public static String getVerboseFragmentPlan(ConnectContext connectContext, String sql) throws Exception {
return getPlanAndFragment(connectContext, sql).second.getExplainString(TExplainLevel.VERBOSE);
}
public static String getPlanThriftString(ConnectContext ctx, String queryStr) throws Exception {
return UtFrameUtils.getThriftString(UtFrameUtils.getPlanAndFragment(ctx, queryStr).second.getFragments());
}
// Lock all database before analyze
private static void lock(Map<String, Database> dbs) {
if (dbs == null) {
return;
}
for (Database db : dbs.values()) {
db.readLock();
}
}
// unLock all database after analyze
private static void unLock(Map<String, Database> dbs) {
if (dbs == null) {
return;
}
for (Database db : dbs.values()) {
db.readUnlock();
}
}
}
|
[
"\"STARROCKS_HOME\"",
"\"STARROCKS_HOME\""
] |
[] |
[
"STARROCKS_HOME"
] |
[]
|
["STARROCKS_HOME"]
|
java
| 1 | 0 | |
train_parsingrcnn.py
|
import os
import sys
from time import time
sys.path.insert(0, os.getcwd())
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
from configs.vip import ParsingRCNNModelConfig
from configs.vip import VIPDataset
# from models.parsing_rcnn_model import PARSING_RCNN
from models.parsing_rcnn_model_dilated import PARSING_RCNN
class trainConfig(ParsingRCNNModelConfig):
# NAME = "vip_singleframe_20190408a"
NAME = "vip_singleframe_test"
GPU_COUNT = 1
IMAGES_PER_GPU = 4
STEPS_PER_EPOCH = 2000
# STEPS_PER_EPOCH = 20
VALIDATION_STEPS = 100
# VALIDATION_STEPS = 10
SAVE_MODEL_PERIOD = 1
# Root directory of the project
ROOT_DIR = os.getcwd()
# Path to trained weights file
PRETRAIN_MODEL_PATH = os.path.join(ROOT_DIR, "checkpoints", "parsing_rcnn.h5")
# PRETRAIN_MODEL_PATH = "/home/sk49/workspace/zhoudu/ATEN/outputs/vip_singleframe_20190326a/checkpoints/" \
# "parsing_rcnn_vip_singleframe_20190326a_epoch038_loss0.491_valloss0.550.h5"
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = "./outputs"
# DEFAULT_DATASET_DIR = "/home/sk49/workspace/dataset/VIP"
DEFAULT_DATASET_DIR = "D:\dataset\VIP_tiny"
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
t0 = time()
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on Pascal Person Part.')
parser.add_argument('--dataset', required=False,
default=DEFAULT_DATASET_DIR,
metavar="/path/to/dataset/",
help='Directory of the dataset')
parser.add_argument('--model', required=False,
default="pretrain",
metavar="/path/to/weights.h5",
help="Path to weights .h5 file")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
args = parser.parse_args()
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
config = trainConfig()
config.display()
# Create model
model = PARSING_RCNN(mode="training", config=config,
model_dir=args.logs)
# Select weights file to load
if args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()[1]
elif args.model.lower() == "pretrain":
model_path = PRETRAIN_MODEL_PATH
else:
model_path = args.model
# common load weight
print("Loading weights ", model_path)
t0 = time()
model.load_weights(model_path, by_name=True)
print("Loaded weights ", time() - t0, "s")
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
# dataset_train = VIPDataset()
# dataset_train.load_vip(args.dataset, "trainval")
# dataset_train.prepare()
dataset_train = VIPDataset()
dataset_train.load_vip(args.dataset, "traintiny")
dataset_train.prepare()
# Validation dataset
# dataset_val = VIPDataset()
# dataset_val.load_vip(args.dataset, "val")
# dataset_val.prepare()
dataset_val = VIPDataset()
dataset_val.load_vip(args.dataset, "traintiny")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Fine tune all layers
model.train(dataset_train, dataset_val,
learning_rate=0.001,
epochs=200,
layers='all',
period=config.SAVE_MODEL_PERIOD)
# model.train(dataset_train, dataset_val,
# learning_rate=0.0001,
# epochs=150,
# layers='all',
# period=config.SAVE_MODEL_PERIOD)
print("total", (time() - t0), "s")
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
MAIL_SERVER = 'mail.messagingengine.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
YNITIUM_MAIL_SUBJECT_PREFIX = '[Ynitium]'
YNITIUM_MAIL_SENDER = 'Ynitium Admin <[email protected]>'
YNITIUM_ADMIN = os.environ.get('YNITIUM_ADMIN')
YNITIUM_POSTS_PER_PAGE = 15
YNITIUM_FOLLOWERS_PER_PAGE = 50
YNITIUM_COMMENTS_PER_PAGE = 30
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.YNITIUM_MAIL_SENDER,
toaddrs=[cls.YNITIUM_ADMIN],
subject=cls.YNITIUM_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'default': DevelopmentConfig
}
|
[] |
[] |
[
"YNITIUM_ADMIN",
"MAIL_PASSWORD",
"DEV_DATABASE_URL",
"DATABASE_URL",
"SECRET_KEY",
"MAIL_USERNAME",
"SSL_DISABLE",
"TEST_DATABASE_URL"
] |
[]
|
["YNITIUM_ADMIN", "MAIL_PASSWORD", "DEV_DATABASE_URL", "DATABASE_URL", "SECRET_KEY", "MAIL_USERNAME", "SSL_DISABLE", "TEST_DATABASE_URL"]
|
python
| 8 | 0 | |
bin/gandalf.go
|
// Copyright 2015 gandalf authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"errors"
"fmt"
"io"
"log/syslog"
"os"
"os/exec"
"path"
"regexp"
"strings"
"github.com/tsuru/config"
"github.com/tsuru/gandalf/db"
"github.com/tsuru/gandalf/repository"
"github.com/tsuru/gandalf/user"
"gopkg.in/mgo.v2/bson"
)
var log *syslog.Writer
func hasWritePermission(u *user.User, r *repository.Repository) (allowed bool) {
for _, userName := range r.Users {
if u.Name == userName {
return true
}
}
return false
}
func hasReadPermission(u *user.User, r *repository.Repository) (allowed bool) {
if r.IsPublic {
return true
}
for _, userName := range r.Users {
if u.Name == userName {
return true
}
}
for _, userName := range r.ReadOnlyUsers {
if u.Name == userName {
return true
}
}
return false
}
// Returns the command being executed by ssh.
// When a user runs `$ git push` from his/her machine, the server
// receives a ssh command, identified by this user (by the ssh key).
// The command and it's parameters are available through the SSH_ORIGINAL_COMMAND
// environment variable. In the git push example, it would have the following value:
// SSH_ORIGINAL_COMMAND=git-receive-pack 'foo.git'
// This function is responsible for retrieving the `git-receive-pack` part of SSH_ORIGINAL_COMMAND
func action() string {
return strings.Split(os.Getenv("SSH_ORIGINAL_COMMAND"), " ")[0]
}
// Get the repository name requested in SSH_ORIGINAL_COMMAND and retrieves
// the related document on the database and returns it.
// This function does two distinct things, parses the SSH_ORIGINAL_COMMAND and
// returns a "validation" error if it doesn't matches the expected format
// and gets the repository from the database based on the info
// obtained by the SSH_ORIGINAL_COMMAND parse.
func requestedRepository() (repository.Repository, error) {
_, repoName, err := parseGitCommand()
if err != nil {
return repository.Repository{}, err
}
var repo repository.Repository
conn, err := db.Conn()
if err != nil {
return repository.Repository{}, err
}
defer conn.Close()
if err := conn.Repository().Find(bson.M{"_id": repoName}).One(&repo); err != nil {
return repository.Repository{}, errors.New("Repository not found")
}
return repo, nil
}
// Checks whether a command is a valid git command
// The following format is allowed:
// (git-[a-z-]+) '/?([\w-+@][\w-+.@]*/)?([\w-]+)\.git'
func parseGitCommand() (command, name string, err error) {
// The following regex validates the git command, which is in the form:
// <git-command> [<namespace>/]<name>
// with namespace being optional. If a namespace is used, we validate it
// according to the following:
// - a namespace is optional
// - a namespace contains only alphanumerics, underlines, @´s, -´s, +´s
// and periods but it does not start with a period (.)
// - one and exactly one slash (/) separates namespace and the actual name
r, err := regexp.Compile(`(git-[a-z-]+) '/?([\w-+@][\w-+.@]*/)?([\w-]+)\.git'`)
if err != nil {
panic(err)
}
m := r.FindStringSubmatch(os.Getenv("SSH_ORIGINAL_COMMAND"))
if len(m) != 4 {
return "", "", errors.New("You've tried to execute some weird command, I'm deliberately denying you to do that, get over it.")
}
return m[1], m[2] + m[3], nil
}
// Executes the SSH_ORIGINAL_COMMAND based on the condition
// defined by the `f` parameter.
// Also receives a custom error message to print to the end user and a
// stdout object, where the SSH_ORIGINAL_COMMAND output is going to be written
func executeAction(f func(*user.User, *repository.Repository) bool, errMsg string, stdout io.Writer) {
var u user.User
conn, err := db.Conn()
if err != nil {
return
}
defer conn.Close()
if err := conn.User().Find(bson.M{"_id": os.Args[1]}).One(&u); err != nil {
log.Err("Error obtaining user. Gandalf database is probably in an inconsistent state.")
fmt.Fprintln(os.Stderr, "Error obtaining user. Gandalf database is probably in an inconsistent state.")
return
}
repo, err := requestedRepository()
if err != nil {
log.Err(err.Error())
fmt.Fprintln(os.Stderr, err.Error())
return
}
if f(&u, &repo) {
// split into a function (maybe executeCmd)
c, err := formatCommand()
if err != nil {
log.Err(err.Error())
fmt.Fprintln(os.Stderr, err.Error())
}
log.Info("Executing " + strings.Join(c, " "))
cmd := exec.Command(c[0], c[1:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = stdout
baseEnv := os.Environ()
baseEnv = append(baseEnv, "TSURU_USER="+u.Name)
cmd.Env = baseEnv
stderr := &bytes.Buffer{}
cmd.Stderr = stderr
err = cmd.Run()
if err != nil {
log.Err("Got error while executing original command: " + err.Error())
log.Err(stderr.String())
fmt.Fprintln(os.Stderr, "Got error while executing original command: "+err.Error())
fmt.Fprintln(os.Stderr, stderr.String())
}
return
}
log.Err("Permission denied.")
log.Err(errMsg)
fmt.Fprintln(os.Stderr, "Permission denied.")
fmt.Fprintln(os.Stderr, errMsg)
}
func formatCommand() ([]string, error) {
p, err := config.GetString("git:bare:location")
if err != nil {
log.Err(err.Error())
return []string{}, err
}
_, repoName, err := parseGitCommand()
if err != nil {
log.Err(err.Error())
return []string{}, err
}
repoName += ".git"
cmdList := strings.Split(os.Getenv("SSH_ORIGINAL_COMMAND"), " ")
if len(cmdList) != 2 {
log.Err("Malformed git command")
return []string{}, fmt.Errorf("Malformed git command")
}
cmdList[1] = path.Join(p, repoName)
return cmdList, nil
}
func main() {
var err error
log, err = syslog.New(syslog.LOG_INFO, "gandalf-listener")
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
panic(err.Error())
}
err = config.ReadConfigFile("/etc/gandalf.conf")
if err != nil {
log.Err(err.Error())
fmt.Fprintln(os.Stderr, err.Error())
return
}
_, _, err = parseGitCommand()
if err != nil {
log.Err(err.Error())
fmt.Fprintln(os.Stderr, err.Error())
return
}
a := action()
if a == "git-receive-pack" {
executeAction(hasWritePermission, "You don't have access to write in this repository.", os.Stdout)
return
}
if a == "git-upload-pack" {
executeAction(hasReadPermission, "You don't have access to read this repository.", os.Stdout)
return
}
}
|
[
"\"SSH_ORIGINAL_COMMAND\"",
"\"SSH_ORIGINAL_COMMAND\"",
"\"SSH_ORIGINAL_COMMAND\""
] |
[] |
[
"SSH_ORIGINAL_COMMAND"
] |
[]
|
["SSH_ORIGINAL_COMMAND"]
|
go
| 1 | 0 | |
localstack/services/awslambda/lambda_api.py
|
import re
import os
import imp
import sys
import json
import uuid
import time
import base64
import logging
import threading
import traceback
import hashlib
import functools
from io import BytesIO
from datetime import datetime
from six.moves import cStringIO as StringIO
from six.moves.urllib.parse import urlparse
from flask import Flask, Response, jsonify, request
from localstack import config
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services import generic_proxy
from localstack.utils.aws import aws_stack, aws_responses
from localstack.services.awslambda import lambda_executors
from localstack.services.awslambda.lambda_executors import (
LAMBDA_RUNTIME_PYTHON27,
LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_NODEJS,
LAMBDA_RUNTIME_NODEJS610,
LAMBDA_RUNTIME_NODEJS810,
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA11,
LAMBDA_RUNTIME_DOTNETCORE2,
LAMBDA_RUNTIME_DOTNETCORE21,
LAMBDA_RUNTIME_GOLANG,
LAMBDA_RUNTIME_RUBY,
LAMBDA_RUNTIME_RUBY25,
LAMBDA_RUNTIME_PROVIDED)
from localstack.utils.common import (to_str, load_file, save_file, TMP_FILES, ensure_readable,
mkdir, unzip, is_zip_file, zip_contains_jar_entries, run, short_uid,
timestamp_millis, parse_chunked_data, now_utc, safe_requests, FuncThread,
isoformat_milliseconds)
from localstack.utils.analytics import event_publisher
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched
APP_NAME = 'lambda_api'
PATH_ROOT = '/2015-03-31'
ARCHIVE_FILE_PATTERN = '%s/lambda.handler.*.jar' % config.TMP_FOLDER
LAMBDA_SCRIPT_PATTERN = '%s/lambda_script_*.py' % config.TMP_FOLDER
# List of Lambda runtime names. Keep them in this list, mainly to silence the linter
LAMBDA_RUNTIMES = [LAMBDA_RUNTIME_PYTHON27, LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE21, LAMBDA_RUNTIME_NODEJS,
LAMBDA_RUNTIME_NODEJS610, LAMBDA_RUNTIME_NODEJS810, LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA11, LAMBDA_RUNTIME_RUBY, LAMBDA_RUNTIME_RUBY25]
# default timeout in seconds
LAMBDA_DEFAULT_TIMEOUT = 3
# default handler and runtime
LAMBDA_DEFAULT_HANDLER = 'handler.handler'
LAMBDA_DEFAULT_RUNTIME = LAMBDA_RUNTIME_PYTHON36
LAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'
LAMBDA_ZIP_FILE_NAME = 'original_lambda_archive.zip'
LAMBDA_JAR_FILE_NAME = 'original_lambda_archive.jar'
DEFAULT_BATCH_SIZE = 10
app = Flask(APP_NAME)
# map ARN strings to lambda function objects
arn_to_lambda = {}
# list of event source mappings for the API
event_source_mappings = []
# logger
LOG = logging.getLogger(__name__)
# mutex for access to CWD and ENV
exec_mutex = threading.Semaphore(1)
# whether to use Docker for execution
DO_USE_DOCKER = None
# start characters indicating that a lambda result should be parsed as JSON
JSON_START_CHAR_MAP = {
list: ('[',),
tuple: ('[',),
dict: ('{',),
str: ('"',),
bytes: ('"',),
bool: ('t', 'f'),
type(None): ('n',),
int: ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'),
float: ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
}
POSSIBLE_JSON_TYPES = (str, bytes)
JSON_START_TYPES = tuple(set(JSON_START_CHAR_MAP.keys()) - set(POSSIBLE_JSON_TYPES))
JSON_START_CHARS = tuple(set(functools.reduce(lambda x, y: x + y, JSON_START_CHAR_MAP.values())))
# SQS listener thread settings
SQS_LISTENER_THREAD = {}
SQS_POLL_INTERVAL_SEC = 1
# lambda executor instance
LAMBDA_EXECUTOR = lambda_executors.AVAILABLE_EXECUTORS.get(config.LAMBDA_EXECUTOR, lambda_executors.DEFAULT_EXECUTOR)
# IAM policy constants
IAM_POLICY_VERSION = '2012-10-17'
POLICY_NAME_PATTERN = 'lambda_policy_%s'
# Marker name to indicate that a bucket represents the local file system. This is used for testing
# Serverless applications where we mount the Lambda code directly into the container from the host OS.
BUCKET_MARKER_LOCAL = '__local__'
class ClientError(Exception):
def __init__(self, msg, code=400):
super(ClientError, self).__init__(msg)
self.code = code
self.msg = msg
def get_response(self):
if isinstance(self.msg, Response):
return self.msg
return error_response(self.msg, self.code)
class LambdaContext(object):
def __init__(self, func_details, qualifier=None):
self.function_name = func_details.name()
self.function_version = func_details.get_qualifier_version(qualifier)
self.invoked_function_arn = func_details.arn()
if qualifier:
self.invoked_function_arn += ':' + qualifier
def get_remaining_time_in_millis(self):
# TODO implement!
return 1000 * 60
def cleanup():
global event_source_mappings, arn_to_lambda
arn_to_lambda = {}
event_source_mappings = []
LAMBDA_EXECUTOR.cleanup()
def func_arn(function_name):
return aws_stack.lambda_function_arn(function_name)
def add_function_mapping(lambda_name, lambda_handler, lambda_cwd=None):
arn = func_arn(lambda_name)
arn_to_lambda[arn].versions.get('$LATEST')['Function'] = lambda_handler
arn_to_lambda[arn].cwd = lambda_cwd
def add_event_source(function_name, source_arn, enabled, batch_size=None):
batch_size = batch_size or DEFAULT_BATCH_SIZE
mapping = {
'UUID': str(uuid.uuid4()),
'StateTransitionReason': 'User action',
'LastModified': float(time.mktime(datetime.utcnow().timetuple())),
'BatchSize': batch_size,
'State': 'Enabled' if enabled is True or enabled is None else 'Disabled',
'FunctionArn': func_arn(function_name),
'EventSourceArn': source_arn,
'LastProcessingResult': 'OK',
'StartingPosition': LAMBDA_DEFAULT_STARTING_POSITION
}
event_source_mappings.append(mapping)
return mapping
def update_event_source(uuid_value, function_name, enabled, batch_size):
for m in event_source_mappings:
if uuid_value == m['UUID']:
if function_name:
m['FunctionArn'] = func_arn(function_name)
m['BatchSize'] = batch_size
m['State'] = 'Enabled' if enabled is True else 'Disabled'
m['LastModified'] = float(time.mktime(datetime.utcnow().timetuple()))
return m
return {}
def delete_event_source(uuid_value):
for i, m in enumerate(event_source_mappings):
if uuid_value == m['UUID']:
return event_source_mappings.pop(i)
return {}
def use_docker():
global DO_USE_DOCKER
if DO_USE_DOCKER is None:
DO_USE_DOCKER = False
if 'docker' in config.LAMBDA_EXECUTOR:
try:
run('docker images', print_error=False)
DO_USE_DOCKER = True
except Exception:
pass
return DO_USE_DOCKER
def process_apigateway_invocation(func_arn, path, payload, headers={},
resource_path=None, method=None, path_params={},
query_string_params={}, request_context={}):
try:
resource_path = resource_path or path
event = {
'path': path,
'headers': dict(headers),
'pathParameters': dict(path_params),
'body': payload,
'isBase64Encoded': False,
'resource': resource_path,
'httpMethod': method,
'queryStringParameters': query_string_params,
'requestContext': request_context,
'stageVariables': {} # TODO
}
return run_lambda(event=event, context={}, func_arn=func_arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on API Gateway message: %s %s' % (e, traceback.format_exc()))
def process_sns_notification(func_arn, topic_arn, subscriptionArn, message, message_attributes, subject='',):
event = {
'Records': [{
'EventSource': 'localstack:sns',
'EventVersion': '1.0',
'EventSubscriptionArn': subscriptionArn,
'Sns': {
'Type': 'Notification',
'TopicArn': topic_arn,
'Subject': subject,
'Message': message,
'Timestamp': timestamp_millis(),
'MessageAttributes': message_attributes
}
}]
}
return run_lambda(event=event, context={}, func_arn=func_arn, asynchronous=True)
def process_kinesis_records(records, stream_name):
def chunks(lst, n):
# Yield successive n-sized chunks from lst.
for i in range(0, len(lst), n):
yield lst[i:i + n]
# feed records into listening lambdas
try:
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
sources = get_event_sources(source_arn=stream_arn)
for source in sources:
arn = source['FunctionArn']
for chunk in chunks(records, source['BatchSize']):
event = {
'Records': [
{
'eventID': 'shardId-000000000000:{0}'.format(rec['sequenceNumber']),
'eventSourceARN': stream_arn,
'kinesis': rec
}
for rec in chunk
]
}
run_lambda(event=event, context={}, func_arn=arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on Kinesis records: %s %s' % (e, traceback.format_exc()))
def start_lambda_sqs_listener():
if SQS_LISTENER_THREAD:
return
def send_event_to_lambda(queue_arn, queue_url, lambda_arn, messages, region):
records = []
for msg in messages:
records.append({
'body': msg['Body'],
'receiptHandle': msg['ReceiptHandle'],
'md5OfBody': msg['MD5OfBody'],
'eventSourceARN': queue_arn,
'eventSource': lambda_executors.EVENT_SOURCE_SQS,
'awsRegion': region,
'messageId': msg['MessageId'],
'attributes': msg.get('Attributes', {}),
'messageAttributes': msg.get('MessageAttributes', {}),
'md5OfMessageAttributes': msg.get('MD5OfMessageAttributes'),
'sqs': True,
})
event = {'Records': records}
def delete_messages(result, func_arn, event, error=None, dlq_sent=None, **kwargs):
if error and not dlq_sent:
# Skip deleting messages from the queue in case of processing errors AND if
# the message has not yet been sent to a dead letter queue (DLQ).
# We'll pick them up and retry next time they become available on the queue.
return
sqs_client = aws_stack.connect_to_service('sqs')
entries = [{'Id': r['receiptHandle'], 'ReceiptHandle': r['receiptHandle']} for r in records]
sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=entries)
# TODO implement retries, based on "RedrivePolicy.maxReceiveCount" in the queue settings
run_lambda(event=event, context={}, func_arn=lambda_arn, asynchronous=True, callback=delete_messages)
def listener_loop(*args):
while True:
try:
sources = get_event_sources(source_arn=r'.*:sqs:.*')
if not sources:
# Temporarily disable polling if no event sources are configured
# anymore. The loop will get restarted next time a message
# arrives and if an event source is configured.
SQS_LISTENER_THREAD.pop('_thread_')
return
sqs_client = aws_stack.connect_to_service('sqs')
for source in sources:
try:
queue_arn = source['EventSourceArn']
lambda_arn = source['FunctionArn']
region_name = queue_arn.split(':')[3]
queue_url = aws_stack.sqs_queue_url_for_arn(queue_arn)
result = sqs_client.receive_message(QueueUrl=queue_url)
messages = result.get('Messages')
if not messages:
continue
send_event_to_lambda(queue_arn, queue_url, lambda_arn, messages, region=region_name)
except Exception as e:
LOG.debug('Unable to poll SQS messages for queue %s: %s' % (queue_arn, e))
except Exception:
pass
finally:
time.sleep(SQS_POLL_INTERVAL_SEC)
LOG.debug('Starting SQS message polling thread for Lambda API')
SQS_LISTENER_THREAD['_thread_'] = FuncThread(listener_loop)
SQS_LISTENER_THREAD['_thread_'].start()
def process_sqs_message(queue_name, message_body, message_attributes, region_name=None):
# feed message into the first listening lambda (message should only get processed once)
try:
region_name = region_name or aws_stack.get_region()
queue_arn = aws_stack.sqs_queue_arn(queue_name, region_name=region_name)
sources = get_event_sources(source_arn=queue_arn)
arns = [s.get('FunctionArn') for s in sources]
LOG.debug('Found %s source mappings for event from SQS queue %s: %s' % (len(arns), queue_arn, arns))
source = (sources or [None])[0]
if not source:
return False
start_lambda_sqs_listener()
return True
except Exception as e:
LOG.warning('Unable to run Lambda function on SQS messages: %s %s' % (e, traceback.format_exc()))
def get_event_sources(func_name=None, source_arn=None):
result = []
for m in event_source_mappings:
if not func_name or (m['FunctionArn'] in [func_name, func_arn(func_name)]):
if _arn_match(mapped=m['EventSourceArn'], searched=source_arn):
result.append(m)
return result
def _arn_match(mapped, searched):
if not searched or mapped == searched:
return True
# Some types of ARNs can end with a path separated by slashes, for
# example the ARN of a DynamoDB stream is tableARN/stream/ID. It's
# a little counterintuitive that a more specific mapped ARN can
# match a less specific ARN on the event, but some integration tests
# rely on it for things like subscribing to a stream and matching an
# event labeled with the table ARN.
if re.match(r'^%s$' % searched, mapped):
return True
if mapped.startswith(searched):
suffix = mapped[len(searched):]
return suffix[0] == '/'
return False
def get_function_version(arn, version):
func = arn_to_lambda.get(arn)
return format_func_details(func, version=version, always_add_version=True)
def publish_new_function_version(arn):
func_details = arn_to_lambda.get(arn)
versions = func_details.versions
last_version = func_details.max_version()
versions[str(last_version + 1)] = {
'CodeSize': versions.get('$LATEST').get('CodeSize'),
'CodeSha256': versions.get('$LATEST').get('CodeSha256'),
'Function': versions.get('$LATEST').get('Function'),
'RevisionId': str(uuid.uuid4())
}
return get_function_version(arn, str(last_version + 1))
def do_list_versions(arn):
return sorted([get_function_version(arn, version) for version in
arn_to_lambda.get(arn).versions.keys()], key=lambda k: str(k.get('Version')))
def do_update_alias(arn, alias, version, description=None):
new_alias = {
'AliasArn': arn + ':' + alias,
'FunctionVersion': version,
'Name': alias,
'Description': description or '',
'RevisionId': str(uuid.uuid4())
}
arn_to_lambda.get(arn).aliases[alias] = new_alias
return new_alias
@cloudwatched('lambda')
def run_lambda(event, context, func_arn, version=None, suppress_output=False,
asynchronous=False, callback=None):
if suppress_output:
stdout_ = sys.stdout
stderr_ = sys.stderr
stream = StringIO()
sys.stdout = stream
sys.stderr = stream
try:
func_arn = aws_stack.fix_arn(func_arn)
func_details = arn_to_lambda.get(func_arn)
if not func_details:
return not_found_error(msg='The resource specified in the request does not exist.')
if not context:
context = LambdaContext(func_details, version)
result = LAMBDA_EXECUTOR.execute(func_arn, func_details, event, context=context,
version=version, asynchronous=asynchronous, callback=callback)
except Exception as e:
return error_response('Error executing Lambda function %s: %s %s' % (func_arn, e, traceback.format_exc()))
finally:
if suppress_output:
sys.stdout = stdout_
sys.stderr = stderr_
return result
def exec_lambda_code(script, handler_function='handler', lambda_cwd=None, lambda_env=None):
if lambda_cwd or lambda_env:
exec_mutex.acquire()
if lambda_cwd:
previous_cwd = os.getcwd()
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
if lambda_env:
previous_env = dict(os.environ)
os.environ.update(lambda_env)
# generate lambda file name
lambda_id = 'l_%s' % short_uid()
lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id)
save_file(lambda_file, script)
# delete temporary .py and .pyc files on exit
TMP_FILES.append(lambda_file)
TMP_FILES.append('%sc' % lambda_file)
try:
pre_sys_modules_keys = set(sys.modules.keys())
try:
handler_module = imp.load_source(lambda_id, lambda_file)
module_vars = handler_module.__dict__
finally:
# the above import can bring files for the function
# (eg settings.py) into the global namespace. subsequent
# calls can pick up file from another function, causing
# general issues.
post_sys_modules_keys = set(sys.modules.keys())
for key in post_sys_modules_keys:
if key not in pre_sys_modules_keys:
sys.modules.pop(key)
except Exception as e:
LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc()))
raise e
finally:
if lambda_cwd or lambda_env:
if lambda_cwd:
os.chdir(previous_cwd)
sys.path.pop(0)
if lambda_env:
os.environ = previous_env
exec_mutex.release()
return module_vars[handler_function]
def get_handler_file_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
# TODO: support Java Lambdas in the future
if runtime.startswith(LAMBDA_RUNTIME_PROVIDED):
return 'bootstrap'
delimiter = '.'
if runtime.startswith(LAMBDA_RUNTIME_NODEJS):
file_ext = '.js'
elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):
file_ext = ''
elif runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE2) or runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE21):
file_ext = '.dll'
delimiter = ':'
elif runtime.startswith(LAMBDA_RUNTIME_RUBY):
file_ext = '.rb'
else:
handler_name = handler_name.rpartition(delimiter)[0].replace(delimiter, os.path.sep)
file_ext = '.py'
return '%s%s' % (handler_name.split(delimiter)[0], file_ext)
def get_handler_function_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
# TODO: support Java Lambdas in the future
if runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE2) or runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE21):
return handler_name.split(':')[-1]
else:
return handler_name.split('.')[-1]
def error_response(msg, code=500, error_type='InternalFailure'):
LOG.warning(msg)
return aws_responses.flask_error_response(msg, code=code, error_type=error_type)
def get_zip_bytes(function_code):
"""Returns the ZIP file contents from a FunctionCode dict.
:type function_code: dict
:param function_code: https://docs.aws.amazon.com/lambda/latest/dg/API_FunctionCode.html
:returns: bytes of the Zip file.
"""
if 'S3Bucket' in function_code:
s3_client = aws_stack.connect_to_service('s3')
bytes_io = BytesIO()
try:
s3_client.download_fileobj(function_code['S3Bucket'], function_code['S3Key'], bytes_io)
zip_file_content = bytes_io.getvalue()
except Exception as e:
raise ClientError('Unable to fetch Lambda archive from S3: %s' % e, 404)
elif 'ZipFile' in function_code:
zip_file_content = function_code['ZipFile']
zip_file_content = base64.b64decode(zip_file_content)
else:
raise ClientError('No valid Lambda archive specified.')
return zip_file_content
def get_java_handler(zip_file_content, main_file, func_details=None):
"""Creates a Java handler from an uploaded ZIP or JAR.
:type zip_file_content: bytes
:param zip_file_content: ZIP file bytes.
:type handler: str
:param handler: The lambda handler path.
:type main_file: str
:param main_file: Filepath to the uploaded ZIP or JAR file.
:returns: function or flask.Response
"""
if is_zip_file(zip_file_content):
def execute(event, context):
result = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(
event, context, main_file=main_file, func_details=func_details)
return result
return execute
raise ClientError(error_response(
'Unable to extract Java Lambda handler - file is not a valid zip/jar file', 400, error_type='ValidationError'))
def set_archive_code(code, lambda_name, zip_file_content=None):
# get metadata
lambda_arn = func_arn(lambda_name)
lambda_details = arn_to_lambda[lambda_arn]
is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
if is_local_mount and config.LAMBDA_REMOTE_DOCKER:
msg = 'Please note that Lambda mounts (bucket name "%s") cannot be used with LAMBDA_REMOTE_DOCKER=1'
raise Exception(msg % BUCKET_MARKER_LOCAL)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(lambda_arn)
if is_local_mount:
# Mount or use a local folder lambda executors can reference
# WARNING: this means we're pointing lambda_cwd to a local path in the user's
# file system! We must ensure that there is no data loss (i.e., we must *not* add
# this folder to TMP_FILES or similar).
return code['S3Key']
# get file content
zip_file_content = zip_file_content or get_zip_bytes(code)
# Save the zip file to a temporary file that the lambda executors can reference
code_sha_256 = base64.standard_b64encode(hashlib.sha256(zip_file_content).digest())
lambda_details.get_version('$LATEST')['CodeSize'] = len(zip_file_content)
lambda_details.get_version('$LATEST')['CodeSha256'] = code_sha_256.decode('utf-8')
tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
mkdir(tmp_dir)
tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
save_file(tmp_file, zip_file_content)
TMP_FILES.append(tmp_dir)
lambda_details.cwd = tmp_dir
return tmp_dir
def set_function_code(code, lambda_name, lambda_cwd=None):
def generic_handler(event, context):
raise ClientError(('Unable to find executor for Lambda function "%s". Note that ' +
'Node.js, Golang, and .Net Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)
arn = func_arn(lambda_name)
lambda_details = arn_to_lambda[arn]
runtime = lambda_details.runtime
lambda_environment = lambda_details.envvars
handler_name = lambda_details.handler = lambda_details.handler or LAMBDA_DEFAULT_HANDLER
code_passed = code
code = code or lambda_details.code
is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
zip_file_content = None
if code_passed:
lambda_cwd = lambda_cwd or set_archive_code(code_passed, lambda_name)
if not is_local_mount:
# Save the zip file to a temporary file that the lambda executors can reference
zip_file_content = get_zip_bytes(code_passed)
else:
lambda_cwd = lambda_cwd or lambda_details.cwd
# get local lambda working directory
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
if not zip_file_content:
zip_file_content = load_file(tmp_file, mode='rb')
# Set the appropriate lambda handler.
lambda_handler = generic_handler
is_java = lambda_executors.is_java_lambda(runtime)
if is_java:
# The Lambda executors for Docker subclass LambdaExecutorContainers, which
# runs Lambda in Docker by passing all *.jar files in the function working
# directory as part of the classpath. Obtain a Java handler function below.
lambda_handler = get_java_handler(zip_file_content, tmp_file, func_details=lambda_details)
if not is_local_mount:
# Lambda code must be uploaded in Zip format
if not is_zip_file(zip_file_content):
raise ClientError(
'Uploaded Lambda code for runtime ({}) is not in Zip format'.format(runtime))
# Unzipping should only be required for (1) non-Java Lambdas, or (2) zip files containing JAR files
if not is_java or zip_contains_jar_entries(zip_file_content, 'lib/'):
unzip(tmp_file, lambda_cwd)
# Obtain handler details for any non-Java Lambda function
if not is_java:
handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
handler_function = get_handler_function_from_name(handler_name, runtime=runtime)
main_file = '%s/%s' % (lambda_cwd, handler_file)
if not os.path.exists(main_file):
# Raise an error if (1) this is not a local mount lambda, or (2) we're
# running Lambdas locally (not in Docker), or (3) we're using remote Docker.
# -> We do *not* want to raise an error if we're using local mount in non-remote Docker
if not is_local_mount or not use_docker() or config.LAMBDA_REMOTE_DOCKER:
file_list = run('cd "%s"; du -d 3 .' % lambda_cwd)
config_debug = ('Config for local mount, docker, remote: "%s", "%s", "%s"' %
(is_local_mount, use_docker(), config.LAMBDA_REMOTE_DOCKER))
LOG.debug('Lambda archive content:\n%s' % file_list)
raise ClientError(error_response(
'Unable to find handler script (%s) in Lambda archive. %s' % (main_file, config_debug),
400, error_type='ValidationError'))
if runtime.startswith('python') and not use_docker():
try:
# make sure the file is actually readable, then read contents
ensure_readable(main_file)
zip_file_content = load_file(main_file, mode='rb')
# extract handler
lambda_handler = exec_lambda_code(
zip_file_content,
handler_function=handler_function,
lambda_cwd=lambda_cwd,
lambda_env=lambda_environment)
except Exception as e:
raise ClientError('Unable to get handler function from lambda code.', e)
add_function_mapping(lambda_name, lambda_handler, lambda_cwd)
return {'FunctionName': lambda_name}
def do_list_functions():
funcs = []
for f_arn, func in arn_to_lambda.items():
if type(func) != LambdaFunction:
continue
func_name = f_arn.split(':function:')[-1]
arn = func_arn(func_name)
func_details = arn_to_lambda.get(arn)
if not func_details:
# this can happen if we're accessing Lambdas from a different region (ARN mismatch)
continue
funcs.append(format_func_details(func_details))
return funcs
def format_func_details(func_details, version=None, always_add_version=False):
version = version or '$LATEST'
func_version = func_details.get_version(version)
result = {
'CodeSha256': func_version.get('CodeSha256'),
'Role': func_details.role,
'Version': version,
'FunctionArn': func_details.arn(),
'FunctionName': func_details.name(),
'CodeSize': func_version.get('CodeSize'),
'Handler': func_details.handler,
'Runtime': func_details.runtime,
'Timeout': func_details.timeout,
'Description': func_details.description,
'MemorySize': func_details.memory_size,
'LastModified': func_details.last_modified,
'TracingConfig': {'Mode': 'PassThrough'},
'RevisionId': func_version.get('RevisionId'),
'State': 'Active'
}
if func_details.envvars:
result['Environment'] = {
'Variables': func_details.envvars
}
if (always_add_version or version != '$LATEST') and len(result['FunctionArn'].split(':')) <= 7:
result['FunctionArn'] += ':%s' % version
return result
def forward_to_fallback_url(func_arn, data):
""" If LAMBDA_FALLBACK_URL is configured, forward the invocation of this non-existing
Lambda to the configured URL. """
if not config.LAMBDA_FALLBACK_URL:
return None
if config.LAMBDA_FALLBACK_URL.startswith('dynamodb://'):
table_name = urlparse(config.LAMBDA_FALLBACK_URL.replace('dynamodb://', 'http://')).netloc
dynamodb = aws_stack.connect_to_service('dynamodb')
item = {
'id': {'S': short_uid()},
'timestamp': {'N': str(now_utc())},
'payload': {'S': str(data)}
}
aws_stack.create_dynamodb_table(table_name, partition_key='id')
dynamodb.put_item(TableName=table_name, Item=item)
return ''
if re.match(r'^https?://.+', config.LAMBDA_FALLBACK_URL):
response = safe_requests.post(config.LAMBDA_FALLBACK_URL, data)
return response.content
raise ClientError('Unexpected value for LAMBDA_FALLBACK_URL: %s' % config.LAMBDA_FALLBACK_URL)
def get_lambda_policy(function):
iam_client = aws_stack.connect_to_service('iam')
policies = iam_client.list_policies(Scope='Local', MaxItems=500)['Policies']
docs = []
for p in policies:
# !TODO: Cache policy documents instead of running N+1 API calls here!
versions = iam_client.list_policy_versions(PolicyArn=p['Arn'])['Versions']
default_version = [v for v in versions if v.get('IsDefaultVersion')]
versions = default_version or versions
doc = versions[0]['Document']
doc = doc if isinstance(doc, dict) else json.loads(doc)
if not isinstance(doc['Statement'], list):
doc['Statement'] = [doc['Statement']]
for stmt in doc['Statement']:
stmt['Principal'] = stmt.get('Principal') or {'AWS': TEST_AWS_ACCOUNT_ID}
doc['PolicyArn'] = p['Arn']
doc['Id'] = 'default'
docs.append(doc)
policy = [d for d in docs if d['Statement'][0]['Resource'] == func_arn(function)]
return (policy or [None])[0]
def not_found_error(ref=None, msg=None):
if not msg:
msg = 'The resource you requested does not exist.'
if ref:
msg = '%s not found: %s' % ('Function' if ':function:' in ref else 'Resource', ref)
return error_response(msg, 404, error_type='ResourceNotFoundException')
# ------------
# API METHODS
# ------------
@app.before_request
def before_request():
# fix to enable chunked encoding, as this is used by some Lambda clients
transfer_encoding = request.headers.get('Transfer-Encoding', '').lower()
if transfer_encoding == 'chunked':
request.environ['wsgi.input_terminated'] = True
@app.route('%s/functions' % PATH_ROOT, methods=['POST'])
def create_function():
""" Create new function
---
operationId: 'createFunction'
parameters:
- name: 'request'
in: body
"""
arn = 'n/a'
try:
data = json.loads(to_str(request.data))
lambda_name = data['FunctionName']
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_CREATE_FUNC,
payload={'n': event_publisher.get_hash(lambda_name)})
arn = func_arn(lambda_name)
if arn in arn_to_lambda:
return error_response('Function already exist: %s' %
lambda_name, 409, error_type='ResourceConflictException')
arn_to_lambda[arn] = func_details = LambdaFunction(arn)
func_details.versions = {'$LATEST': {'RevisionId': str(uuid.uuid4())}}
func_details.last_modified = isoformat_milliseconds(datetime.utcnow()) + '+0000'
func_details.description = data.get('Description', '')
func_details.handler = data['Handler']
func_details.runtime = data['Runtime']
func_details.envvars = data.get('Environment', {}).get('Variables', {})
func_details.tags = data.get('Tags', {})
func_details.timeout = data.get('Timeout', LAMBDA_DEFAULT_TIMEOUT)
func_details.role = data['Role']
func_details.memory_size = data.get('MemorySize')
func_details.code = data['Code']
func_details.set_dead_letter_config(data)
result = set_function_code(func_details.code, lambda_name)
if isinstance(result, Response):
del arn_to_lambda[arn]
return result
# remove content from code attribute, if present
func_details.code.pop('ZipFile', None)
# prepare result
result.update(format_func_details(func_details))
if data.get('Publish', False):
result['Version'] = publish_new_function_version(arn)['Version']
return jsonify(result or {})
except Exception as e:
arn_to_lambda.pop(arn, None)
if isinstance(e, ClientError):
return e.get_response()
return error_response('Unknown error: %s %s' % (e, traceback.format_exc()))
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['GET'])
def get_function(function):
""" Get details for a single function
---
operationId: 'getFunction'
parameters:
- name: 'request'
in: body
- name: 'function'
in: path
"""
funcs = do_list_functions()
for func in funcs:
if func['FunctionName'] == function:
result = {
'Configuration': func,
'Code': {
'Location': '%s/code' % request.url
}
}
lambda_details = arn_to_lambda.get(func['FunctionArn'])
if lambda_details.concurrency is not None:
result['Concurrency'] = lambda_details.concurrency
return jsonify(result)
return not_found_error(func_arn(function))
@app.route('%s/functions/' % PATH_ROOT, methods=['GET'])
def list_functions():
""" List functions
---
operationId: 'listFunctions'
parameters:
- name: 'request'
in: body
"""
funcs = do_list_functions()
result = {}
result['Functions'] = funcs
return jsonify(result)
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['DELETE'])
def delete_function(function):
""" Delete an existing function
---
operationId: 'deleteFunction'
parameters:
- name: 'request'
in: body
"""
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
try:
arn_to_lambda.pop(arn)
except KeyError:
return not_found_error(func_arn(function))
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_DELETE_FUNC,
payload={'n': event_publisher.get_hash(function)})
i = 0
while i < len(event_source_mappings):
mapping = event_source_mappings[i]
if mapping['FunctionArn'] == arn:
del event_source_mappings[i]
i -= 1
i += 1
result = {}
return jsonify(result)
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['PUT'])
def update_function_code(function):
""" Update the code of an existing function
---
operationId: 'updateFunctionCode'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
result = set_function_code(data, function)
arn = func_arn(function)
func_details = arn_to_lambda.get(arn)
result.update(format_func_details(func_details))
if isinstance(result, Response):
return result
return jsonify(result or {})
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['GET'])
def get_function_code(function):
""" Get the code of an existing function
---
operationId: 'getFunctionCode'
parameters:
"""
arn = func_arn(function)
lambda_cwd = arn_to_lambda[arn].cwd
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
return Response(load_file(tmp_file, mode='rb'),
mimetype='application/zip',
headers={'Content-Disposition': 'attachment; filename=lambda_archive.zip'})
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['GET'])
def get_function_configuration(function):
""" Get the configuration of an existing function
---
operationId: 'getFunctionConfiguration'
parameters:
"""
arn = func_arn(function)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return not_found_error(arn)
result = format_func_details(lambda_details)
return jsonify(result)
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['PUT'])
def update_function_configuration(function):
""" Update the configuration of an existing function
---
operationId: 'updateFunctionConfiguration'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return error_response('Unable to find Lambda function ARN "%s"' % arn,
404, error_type='ResourceNotFoundException')
if data.get('Handler'):
lambda_details.handler = data['Handler']
if data.get('Runtime'):
lambda_details.runtime = data['Runtime']
lambda_details.set_dead_letter_config(data)
env_vars = data.get('Environment', {}).get('Variables')
if env_vars is not None:
lambda_details.envvars = env_vars
if data.get('Timeout'):
lambda_details.timeout = data['Timeout']
return jsonify(data)
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['POST'])
def add_permission(function):
data = json.loads(to_str(request.data))
iam_client = aws_stack.connect_to_service('iam')
sid = data.get('StatementId')
policy = {
'Version': IAM_POLICY_VERSION,
'Id': 'LambdaFuncAccess-%s' % sid,
'Statement': [{
'Sid': sid,
'Effect': 'Allow',
# TODO: 'Principal' in policies not yet supported in upstream moto
# 'Principal': data.get('Principal') or {'AWS': TEST_AWS_ACCOUNT_ID},
'Action': data.get('Action'),
'Resource': func_arn(function)
}]
}
iam_client.create_policy(PolicyName=POLICY_NAME_PATTERN % function,
PolicyDocument=json.dumps(policy), Description='Policy for Lambda function "%s"' % function)
result = {'Statement': sid}
return jsonify(result)
@app.route('%s/functions/<function>/policy/<statement>' % PATH_ROOT, methods=['DELETE'])
def remove_permission(function, statement):
qualifier = request.args.get('Qualifier')
iam_client = aws_stack.connect_to_service('iam')
policy = get_lambda_policy(function)
if not policy:
return error_response('Unable to find policy for Lambda function "%s"' % function,
404, error_type='ResourceNotFoundException')
iam_client.delete_policy(PolicyArn=policy['PolicyArn'])
result = {
'FunctionName': function,
'Qualifier': qualifier,
'StatementId': policy['Statement'][0]['Sid'],
}
return jsonify(result)
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['GET'])
def get_policy(function):
policy = get_lambda_policy(function)
if not policy:
return error_response('The resource you requested does not exist.',
404, error_type='ResourceNotFoundException')
return jsonify({'Policy': json.dumps(policy), 'RevisionId': 'test1234'})
@app.route('%s/functions/<function>/invocations' % PATH_ROOT, methods=['POST'])
def invoke_function(function):
""" Invoke an existing function
---
operationId: 'invokeFunction'
parameters:
- name: 'request'
in: body
"""
# function here can either be an arn or a function name
arn = func_arn(function)
# arn can also contain a qualifier, extract it from there if so
m = re.match('(arn:aws:lambda:.*:.*:function:[a-zA-Z0-9-_]+)(:.*)?', arn)
if m and m.group(2):
qualifier = m.group(2)[1:]
arn = m.group(1)
else:
qualifier = request.args.get('Qualifier')
data = request.get_data()
if data:
data = to_str(data)
try:
data = json.loads(data)
except Exception:
try:
# try to read chunked content
data = json.loads(parse_chunked_data(data))
except Exception:
return error_response('The payload is not JSON: %s' % data, 415,
error_type='UnsupportedMediaTypeException')
# Default invocation type is RequestResponse
invocation_type = request.environ.get('HTTP_X_AMZ_INVOCATION_TYPE', 'RequestResponse')
def _create_response(result, status_code=200):
""" Create the final response for the given invocation result """
if isinstance(result, Response):
return result
details = {
'StatusCode': status_code,
'Payload': result,
'Headers': {}
}
if isinstance(result, dict):
for key in ('StatusCode', 'Payload', 'FunctionError'):
if result.get(key):
details[key] = result[key]
# Try to parse parse payload as JSON
was_json = False
payload = details['Payload']
if payload and isinstance(payload, POSSIBLE_JSON_TYPES) and payload[0] in JSON_START_CHARS:
try:
details['Payload'] = json.loads(details['Payload'])
was_json = True
except Exception:
pass
# Set error headers
if details.get('FunctionError'):
details['Headers']['X-Amz-Function-Error'] = str(details['FunctionError'])
# Construct response object
response_obj = details['Payload']
if was_json or isinstance(response_obj, JSON_START_TYPES):
response_obj = jsonify(response_obj)
details['Headers']['Content-Type'] = 'application/json'
else:
response_obj = str(response_obj)
details['Headers']['Content-Type'] = 'text/plain'
return response_obj, details['StatusCode'], details['Headers']
# check if this lambda function exists
not_found = None
if arn not in arn_to_lambda:
not_found = not_found_error(arn)
elif qualifier and not arn_to_lambda.get(arn).qualifier_exists(qualifier):
not_found = not_found_error('{0}:{1}'.format(arn, qualifier))
if not_found:
forward_result = forward_to_fallback_url(func_arn, data)
if forward_result is not None:
return _create_response(forward_result)
return not_found
if invocation_type == 'RequestResponse':
result = run_lambda(asynchronous=False, func_arn=arn, event=data, context={}, version=qualifier)
return _create_response(result)
elif invocation_type == 'Event':
run_lambda(asynchronous=True, func_arn=arn, event=data, context={}, version=qualifier)
return _create_response('', status_code=202)
elif invocation_type == 'DryRun':
# Assume the dry run always passes.
return _create_response('', status_code=204)
return error_response('Invocation type not one of: RequestResponse, Event or DryRun',
code=400, error_type='InvalidParameterValueException')
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['GET'])
def list_event_source_mappings():
""" List event source mappings
---
operationId: 'listEventSourceMappings'
"""
event_source_arn = request.args.get('EventSourceArn')
function_name = request.args.get('FunctionName')
mappings = event_source_mappings
if event_source_arn:
mappings = [m for m in mappings if event_source_arn == m.get('EventSourceArn')]
if function_name:
function_arn = func_arn(function_name)
mappings = [m for m in mappings if function_arn == m.get('FunctionArn')]
response = {
'EventSourceMappings': mappings
}
return jsonify(response)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['GET'])
def get_event_source_mapping(mapping_uuid):
""" Get an existing event source mapping
---
operationId: 'getEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
mappings = event_source_mappings
mappings = [m for m in mappings if mapping_uuid == m.get('UUID')]
if len(mappings) == 0:
return not_found_error()
return jsonify(mappings[0])
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['POST'])
def create_event_source_mapping():
""" Create new event source mapping
---
operationId: 'createEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
mapping = add_event_source(
data['FunctionName'], data['EventSourceArn'], data.get('Enabled'), data.get('BatchSize')
)
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['PUT'])
def update_event_source_mapping(mapping_uuid):
""" Update an existing event source mapping
---
operationId: 'updateEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(request.data)
if not mapping_uuid:
return jsonify({})
function_name = data.get('FunctionName') or ''
enabled = data.get('Enabled', True)
batch_size = data.get('BatchSize') or 100
mapping = update_event_source(mapping_uuid, function_name, enabled, batch_size)
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['DELETE'])
def delete_event_source_mapping(mapping_uuid):
""" Delete an event source mapping
---
operationId: 'deleteEventSourceMapping'
"""
if not mapping_uuid:
return jsonify({})
mapping = delete_event_source(mapping_uuid)
return jsonify(mapping)
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['POST'])
def publish_version(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify(publish_new_function_version(arn))
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['GET'])
def list_versions(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify({'Versions': do_list_versions(arn)})
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['POST'])
def create_alias(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
data = json.loads(request.data)
alias = data.get('Name')
if alias in arn_to_lambda.get(arn).aliases:
return error_response('Alias already exists: %s' % arn + ':' + alias, 404,
error_type='ResourceConflictException')
version = data.get('FunctionVersion')
description = data.get('Description')
return jsonify(do_update_alias(arn, alias, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['PUT'])
def update_alias(function, name):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
if name not in arn_to_lambda.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
current_alias = arn_to_lambda.get(arn).aliases.get(name)
data = json.loads(request.data)
version = data.get('FunctionVersion') or current_alias.get('FunctionVersion')
description = data.get('Description') or current_alias.get('Description')
return jsonify(do_update_alias(arn, name, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['GET'])
def get_alias(function, name):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
if name not in arn_to_lambda.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
return jsonify(arn_to_lambda.get(arn).aliases.get(name))
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['GET'])
def list_aliases(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify({'Aliases': sorted(arn_to_lambda.get(arn).aliases.values(),
key=lambda x: x['Name'])})
@app.route('/<version>/functions/<function>/concurrency', methods=['PUT'])
def put_concurrency(version, function):
# the version for put_concurrency != PATH_ROOT, at the time of this
# writing it's: /2017-10-31 for this endpoint
# https://docs.aws.amazon.com/lambda/latest/dg/API_PutFunctionConcurrency.html
arn = func_arn(function)
data = json.loads(request.data)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return not_found_error(arn)
lambda_details.concurrency = data
return jsonify(data)
@app.route('/<version>/tags/<arn>', methods=['GET'])
def list_tags(version, arn):
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
result = {'Tags': func_details.tags}
return jsonify(result)
@app.route('/<version>/tags/<arn>', methods=['POST'])
def tag_resource(version, arn):
data = json.loads(request.data)
tags = data.get('Tags', {})
if tags:
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
if func_details:
func_details.tags.update(tags)
return jsonify({})
@app.route('/<version>/tags/<arn>', methods=['DELETE'])
def untag_resource(version, arn):
tag_keys = request.args.getlist('tagKeys')
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
for tag_key in tag_keys:
func_details.tags.pop(tag_key, None)
return jsonify({})
def serve(port, quiet=True):
# initialize the Lambda executor
LAMBDA_EXECUTOR.startup()
generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
b2sdk/raw_api.py
|
######################################################################
#
# File: b2sdk/raw_api.py
#
# Copyright 2019 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from __future__ import print_function
import base64
import os
import random
import re
import sys
import time
import traceback
from abc import ABCMeta, abstractmethod
import six
from .b2http import B2Http
from .exception import UnusableFileName
from .utils import b2_url_encode, hex_sha1_of_stream
# All possible capabilities
ALL_CAPABILITIES = [
'listKeys',
'writeKeys',
'deleteKeys',
'listBuckets',
'writeBuckets',
'deleteBuckets',
'listFiles',
'readFiles',
'shareFiles',
'writeFiles',
'deleteFiles',
]
# Standard names for file info entries
SRC_LAST_MODIFIED_MILLIS = 'src_last_modified_millis'
# Special X-Bz-Content-Sha1 value to verify checksum at the end
HEX_DIGITS_AT_END = 'hex_digits_at_end'
# API version number to use when calling the service
API_VERSION = 'v2'
@six.add_metaclass(ABCMeta)
class AbstractRawApi(object):
"""
Direct access to the B2 web apis.
"""
@abstractmethod
def cancel_large_file(self, api_url, account_auth_token, file_id):
pass
@abstractmethod
def delete_bucket(self, api_url, account_auth_token, account_id, bucket_id):
pass
@abstractmethod
def delete_file_version(self, api_url, account_auth_token, file_id, file_name):
pass
@abstractmethod
def finish_large_file(self, api_url, account_auth_token, file_id, part_sha1_array):
pass
@abstractmethod
def get_upload_part_url(self, api_url, account_auth_token, file_id):
pass
@abstractmethod
def hide_file(self, api_url, account_auth_token, bucket_id, file_name):
pass
@abstractmethod
def list_parts(self, api_url, account_auth_token, file_id, start_part_number, max_part_count):
pass
@abstractmethod
def list_unfinished_large_files(
self, api_url, account_auth_token, bucket_id, start_file_id=None, max_file_count=None
):
pass
@abstractmethod
def start_large_file(
self, api_url, account_auth_token, bucket_id, file_name, content_type, file_info
):
pass
@abstractmethod
def update_bucket(
self,
api_url,
account_auth_token,
account_id,
bucket_id,
bucket_type=None,
bucket_info=None,
cors_rules=None,
lifecycle_rules=None,
if_revision_is=None
):
pass
@abstractmethod
def upload_part(
self, upload_url, upload_auth_token, part_number, content_length, sha1_sum, input_stream
):
pass
def get_download_url_by_id(self, download_url, account_auth_token, file_id):
return '%s/b2api/%s/b2_download_file_by_id?fileId=%s' % (download_url, API_VERSION, file_id)
def get_download_url_by_name(self, download_url, account_auth_token, bucket_name, file_name):
return download_url + '/file/' + bucket_name + '/' + b2_url_encode(file_name)
class B2RawApi(AbstractRawApi):
"""
Provides access to the B2 web APIs, exactly as they are provided by b2.
Requires that you provide all necessary URLs and auth tokens for each call.
Each API call decodes the returned JSON and returns a dict.
For details on what each method does, see the B2 docs:
https://www.backblaze.com/b2/docs/
This class is intended to be a super-simple, very thin layer on top
of the HTTP calls. It can be mocked-out for testing higher layers.
And this class can be tested by exercising each call just once,
which is relatively quick.
All public methods of this class except authorize_account shall accept
api_url and account_info as first two positional arguments. This is needed
for B2Session magic.
"""
def __init__(self, b2_http):
self.b2_http = b2_http
def _post_json(self, base_url, api_name, auth, **params):
"""
Helper method for calling an API with the given auth and params.
:param base_url: Something like "https://api001.backblazeb2.com/"
:param auth: Passed in Authorization header.
:param api_name: Example: "b2_create_bucket"
:param args: The rest of the parameters are passed to b2.
:return:
"""
url = '%s/b2api/%s/%s' % (base_url, API_VERSION, api_name)
headers = {'Authorization': auth}
return self.b2_http.post_json_return_json(url, headers, params)
def authorize_account(self, realm_url, application_key_id, application_key):
auth = b'Basic ' + base64.b64encode(six.b('%s:%s' % (application_key_id, application_key)))
return self._post_json(realm_url, 'b2_authorize_account', auth)
def cancel_large_file(self, api_url, account_auth_token, file_id):
return self._post_json(api_url, 'b2_cancel_large_file', account_auth_token, fileId=file_id)
def create_bucket(
self,
api_url,
account_auth_token,
account_id,
bucket_name,
bucket_type,
bucket_info=None,
cors_rules=None,
lifecycle_rules=None
):
return self._post_json(
api_url,
'b2_create_bucket',
account_auth_token,
accountId=account_id,
bucketName=bucket_name,
bucketType=bucket_type,
bucketInfo=bucket_info,
corsRules=cors_rules,
lifecycleRules=lifecycle_rules
)
def create_key(
self, api_url, account_auth_token, account_id, capabilities, key_name,
valid_duration_seconds, bucket_id, name_prefix
):
return self._post_json(
api_url,
'b2_create_key',
account_auth_token,
accountId=account_id,
capabilities=capabilities,
keyName=key_name,
validDurationInSeconds=valid_duration_seconds,
bucketId=bucket_id,
namePrefix=name_prefix,
)
def delete_bucket(self, api_url, account_auth_token, account_id, bucket_id):
return self._post_json(
api_url,
'b2_delete_bucket',
account_auth_token,
accountId=account_id,
bucketId=bucket_id
)
def delete_file_version(self, api_url, account_auth_token, file_id, file_name):
return self._post_json(
api_url,
'b2_delete_file_version',
account_auth_token,
fileId=file_id,
fileName=file_name
)
def delete_key(self, api_url, account_auth_token, application_key_id):
return self._post_json(
api_url,
'b2_delete_key',
account_auth_token,
applicationKeyId=application_key_id,
)
def download_file_from_url(self, _, account_auth_token_or_none, url, range_=None):
"""
Issues a streaming request for download of a file, potentially authorized.
:param _: unused (caused by B2Session magic)
:param account_auth_token_or_none: an optional account auth token to pass in
:param url: The full URL to download from
:param range: two-element tuple for http Range header
:return: b2_http response
"""
request_headers = {}
_add_range_header(request_headers, range_)
if account_auth_token_or_none is not None:
request_headers['Authorization'] = account_auth_token_or_none
return self.b2_http.get_content(url, request_headers)
def finish_large_file(self, api_url, account_auth_token, file_id, part_sha1_array):
return self._post_json(
api_url,
'b2_finish_large_file',
account_auth_token,
fileId=file_id,
partSha1Array=part_sha1_array
)
def get_download_authorization(
self, api_url, account_auth_token, bucket_id, file_name_prefix, valid_duration_in_seconds
):
return self._post_json(
api_url,
'b2_get_download_authorization',
account_auth_token,
bucketId=bucket_id,
fileNamePrefix=file_name_prefix,
validDurationInSeconds=valid_duration_in_seconds
)
def get_file_info(self, api_url, account_auth_token, file_id):
return self._post_json(api_url, 'b2_get_file_info', account_auth_token, fileId=file_id)
def get_upload_url(self, api_url, account_auth_token, bucket_id):
return self._post_json(api_url, 'b2_get_upload_url', account_auth_token, bucketId=bucket_id)
def get_upload_part_url(self, api_url, account_auth_token, file_id):
return self._post_json(
api_url, 'b2_get_upload_part_url', account_auth_token, fileId=file_id
)
def hide_file(self, api_url, account_auth_token, bucket_id, file_name):
return self._post_json(
api_url, 'b2_hide_file', account_auth_token, bucketId=bucket_id, fileName=file_name
)
def list_buckets(
self,
api_url,
account_auth_token,
account_id,
bucket_id=None,
bucket_name=None,
):
return self._post_json(
api_url,
'b2_list_buckets',
account_auth_token,
accountId=account_id,
bucketTypes=['all'],
bucketId=bucket_id,
bucketName=bucket_name,
)
def list_file_names(
self, api_url, account_auth_token, bucket_id, start_file_name=None, max_file_count=None
):
return self._post_json(
api_url,
'b2_list_file_names',
account_auth_token,
bucketId=bucket_id,
startFileName=start_file_name,
maxFileCount=max_file_count
)
def list_file_versions(
self,
api_url,
account_auth_token,
bucket_id,
start_file_name=None,
start_file_id=None,
max_file_count=None
):
return self._post_json(
api_url,
'b2_list_file_versions',
account_auth_token,
bucketId=bucket_id,
startFileName=start_file_name,
startFileId=start_file_id,
maxFileCount=max_file_count
)
def list_keys(
self,
api_url,
account_auth_token,
account_id,
max_key_count=None,
start_application_key_id=None
):
return self._post_json(
api_url,
'b2_list_keys',
account_auth_token,
accountId=account_id,
maxKeyCount=max_key_count,
startApplicationKeyId=start_application_key_id,
)
def list_parts(self, api_url, account_auth_token, file_id, start_part_number, max_part_count):
return self._post_json(
api_url,
'b2_list_parts',
account_auth_token,
fileId=file_id,
startPartNumber=start_part_number,
maxPartCount=max_part_count
)
def list_unfinished_large_files(
self, api_url, account_auth_token, bucket_id, start_file_id=None, max_file_count=None
):
return self._post_json(
api_url,
'b2_list_unfinished_large_files',
account_auth_token,
bucketId=bucket_id,
startFileId=start_file_id,
maxFileCount=max_file_count
)
def start_large_file(
self, api_url, account_auth_token, bucket_id, file_name, content_type, file_info
):
return self._post_json(
api_url,
'b2_start_large_file',
account_auth_token,
bucketId=bucket_id,
fileName=file_name,
fileInfo=file_info,
contentType=content_type
)
def update_bucket(
self,
api_url,
account_auth_token,
account_id,
bucket_id,
bucket_type=None,
bucket_info=None,
cors_rules=None,
lifecycle_rules=None,
if_revision_is=None
):
assert bucket_info or bucket_type
kwargs = {}
if if_revision_is is not None:
kwargs['ifRevisionIs'] = if_revision_is
if bucket_info is not None:
kwargs['bucketInfo'] = bucket_info
if bucket_type is not None:
kwargs['bucketType'] = bucket_type
if cors_rules is not None:
kwargs['corsRules'] = cors_rules
if lifecycle_rules is not None:
kwargs['lifecycleRules'] = lifecycle_rules
return self._post_json(
api_url,
'b2_update_bucket',
account_auth_token,
accountId=account_id,
bucketId=bucket_id,
**kwargs
)
def unprintable_to_hex(self, string):
"""
Replace unprintable chars in string with a hex representation.
:param string: An arbitrary string, possibly with unprintable characters.
:return: The string, with unprintable characters changed to hex (e.g., "\x07")
"""
unprintables_pattern = re.compile(r'[\x00-\x1f]')
def hexify(match):
return r'\x{0:02x}'.format(ord(match.group()))
return unprintables_pattern.sub(hexify, string)
def check_b2_filename(self, filename):
"""
Raise an appropriate exception with details if the filename is unusable.
See https://www.backblaze.com/b2/docs/files.html for the rules.
:param filename: A proposed filename in unicode.
:return: None if the filename is usable.
"""
encoded_name = filename.encode('utf-8')
length_in_bytes = len(encoded_name)
if length_in_bytes < 1:
raise UnusableFileName("Filename must be at least 1 character.")
if length_in_bytes > 1024:
raise UnusableFileName("Filename is too long (can be at most 1024 bytes).")
lowest_unicode_value = ord(min(filename))
if lowest_unicode_value < 32:
message = u"Filename \"{0}\" contains code {1} (hex {2:02x}), less than 32.".format(
self.unprintable_to_hex(filename), lowest_unicode_value, lowest_unicode_value
)
raise UnusableFileName(message)
# No DEL for you.
if '\x7f' in filename:
raise UnusableFileName("DEL character (0x7f) not allowed.")
if filename[0] == '/' or filename[-1] == '/':
raise UnusableFileName("Filename may not start or end with '/'.")
if '//' in filename:
raise UnusableFileName("Filename may not contain \"//\".")
long_segment = max([len(segment.encode('utf-8')) for segment in filename.split('/')])
if long_segment > 250:
raise UnusableFileName("Filename segment too long (maximum 250 bytes in utf-8).")
def upload_file(
self, upload_url, upload_auth_token, file_name, content_length, content_type, content_sha1,
file_infos, data_stream
):
"""
Uploads one small file to b2.
:param upload_url: The upload_url from b2_authorize_account
:param upload_auth_token: The auth token from b2_authorize_account
:param file_name: The name of the B2 file
:param content_length: Number of bytes in the file.
:param content_type: MIME type.
:param content_sha1: Hex SHA1 of the contents of the file
:param file_infos: Extra file info to upload
:param data_stream: A file like object from which the contents of the file can be read.
:return:
"""
# Raise UnusableFileName if the file_name doesn't meet the rules.
self.check_b2_filename(file_name)
headers = {
'Authorization': upload_auth_token,
'Content-Length': str(content_length),
'X-Bz-File-Name': b2_url_encode(file_name),
'Content-Type': content_type,
'X-Bz-Content-Sha1': content_sha1
}
for k, v in six.iteritems(file_infos):
headers['X-Bz-Info-' + k] = b2_url_encode(v)
return self.b2_http.post_content_return_json(upload_url, headers, data_stream)
def upload_part(
self, upload_url, upload_auth_token, part_number, content_length, content_sha1, data_stream
):
headers = {
'Authorization': upload_auth_token,
'Content-Length': str(content_length),
'X-Bz-Part-Number': str(part_number),
'X-Bz-Content-Sha1': content_sha1
}
return self.b2_http.post_content_return_json(upload_url, headers, data_stream)
def test_raw_api():
"""
Exercises the code in B2RawApi by making each call once, just
to make sure the parameters are passed in, and the result is
passed back.
The goal is to be a complete test of B2RawApi, so the tests for
the rest of the code can use the simulator.
Prints to stdout if things go wrong.
:return: 0 on success, non-zero on failure.
"""
try:
raw_api = B2RawApi(B2Http())
test_raw_api_helper(raw_api)
return 0
except Exception:
traceback.print_exc(file=sys.stdout)
return 1
def test_raw_api_helper(raw_api):
"""
Tries each of the calls to the raw api. Raises an
except if anything goes wrong.
This uses a Backblaze account that is just for this test.
The account uses the free level of service, which should
be enough to run this test a reasonable number of times
each day. If somebody abuses the account for other things,
this test will break and we'll have to do something about
it.
"""
application_key_id = os.environ.get('TEST_APPLICATION_KEY_ID')
if application_key_id is None:
print('TEST_APPLICATION_KEY_ID is not set.', file=sys.stderr)
sys.exit(1)
application_key = os.environ.get('TEST_APPLICATION_KEY')
if application_key is None:
print('TEST_APPLICATION_KEY is not set.', file=sys.stderr)
sys.exit(1)
realm_url = 'https://api.backblazeb2.com'
# b2_authorize_account
print('b2_authorize_account')
auth_dict = raw_api.authorize_account(realm_url, application_key_id, application_key)
account_id = auth_dict['accountId']
account_auth_token = auth_dict['authorizationToken']
api_url = auth_dict['apiUrl']
download_url = auth_dict['downloadUrl']
# b2_create_key
print('b2_create_key')
key_dict = raw_api.create_key(
api_url,
account_auth_token,
account_id,
['readFiles'],
'testKey',
None,
None,
None,
)
# b2_list_keys
print('b2_list_keys')
raw_api.list_keys(api_url, account_auth_token, account_id, 10)
# b2_delete_key
print('b2_delete_key')
raw_api.delete_key(api_url, account_auth_token, key_dict['applicationKeyId'])
# b2_create_bucket, with a unique bucket name
# Include the account ID in the bucket name to be
# sure it doesn't collide with bucket names from
# other accounts.
print('b2_create_bucket')
bucket_name = 'test-raw-api-%s-%d-%d' % (
account_id, int(time.time()), random.randint(1000, 9999)
)
bucket_dict = raw_api.create_bucket(
api_url, account_auth_token, account_id, bucket_name, 'allPublic'
)
bucket_id = bucket_dict['bucketId']
first_bucket_revision = bucket_dict['revision']
# b2_list_buckets
print('b2_list_buckets')
bucket_list_dict = raw_api.list_buckets(api_url, account_auth_token, account_id)
# b2_get_upload_url
print('b2_get_upload_url')
upload_url_dict = raw_api.get_upload_url(api_url, account_auth_token, bucket_id)
upload_url = upload_url_dict['uploadUrl']
upload_auth_token = upload_url_dict['authorizationToken']
# b2_upload_file
print('b2_upload_file')
file_name = 'test.txt'
file_contents = six.b('hello world')
file_sha1 = hex_sha1_of_stream(six.BytesIO(file_contents), len(file_contents))
file_dict = raw_api.upload_file(
upload_url,
upload_auth_token,
file_name,
len(file_contents),
'text/plain',
file_sha1,
{'color': 'blue'},
six.BytesIO(file_contents),
)
file_id = file_dict['fileId']
# b2_download_file_by_id with auth
print('b2_download_file_by_id (auth)')
url = raw_api.get_download_url_by_id(download_url, None, file_id)
with raw_api.download_file_from_url(None, account_auth_token, url) as response:
data = next(response.iter_content(chunk_size=len(file_contents)))
assert data == file_contents, data
# b2_download_file_by_id no auth
print('b2_download_file_by_id (no auth)')
url = raw_api.get_download_url_by_id(download_url, None, file_id)
with raw_api.download_file_from_url(None, None, url) as response:
data = next(response.iter_content(chunk_size=len(file_contents)))
assert data == file_contents, data
# b2_download_file_by_name with auth
print('b2_download_file_by_name (auth)')
url = raw_api.get_download_url_by_name(download_url, None, bucket_name, file_name)
with raw_api.download_file_from_url(None, account_auth_token, url) as response:
data = next(response.iter_content(chunk_size=len(file_contents)))
assert data == file_contents, data
# b2_download_file_by_name no auth
print('b2_download_file_by_name (no auth)')
url = raw_api.get_download_url_by_name(download_url, None, bucket_name, file_name)
with raw_api.download_file_from_url(None, None, url) as response:
data = next(response.iter_content(chunk_size=len(file_contents)))
assert data == file_contents, data
# b2_get_download_authorization
print('b2_get_download_authorization')
download_auth = raw_api.get_download_authorization(
api_url, account_auth_token, bucket_id, file_name[:-2], 12345
)
download_auth_token = download_auth['authorizationToken']
# b2_download_file_by_name with download auth
print('b2_download_file_by_name (download auth)')
url = raw_api.get_download_url_by_name(download_url, None, bucket_name, file_name)
with raw_api.download_file_from_url(None, download_auth_token, url) as response:
data = next(response.iter_content(chunk_size=len(file_contents)))
assert data == file_contents, data
# b2_list_file_names
print('b2_list_file_names')
list_names_dict = raw_api.list_file_names(api_url, account_auth_token, bucket_id)
assert [file_name] == [f_dict['fileName'] for f_dict in list_names_dict['files']]
# b2_list_file_names (start, count)
print('b2_list_file_names (start, count)')
list_names_dict = raw_api.list_file_names(
api_url, account_auth_token, bucket_id, start_file_name=file_name, max_file_count=5
)
assert [file_name] == [f_dict['fileName'] for f_dict in list_names_dict['files']]
# b2_hide_file
print('b2_hide_file')
raw_api.hide_file(api_url, account_auth_token, bucket_id, file_name)
# b2_get_file_info
print('b2_get_file_info')
file_info_dict = raw_api.get_file_info(api_url, account_auth_token, file_id)
assert file_info_dict['fileName'] == file_name
# b2_start_large_file
print('b2_start_large_file')
file_info = {'color': 'red'}
large_info = raw_api.start_large_file(
api_url, account_auth_token, bucket_id, file_name, 'text/plain', file_info
)
large_file_id = large_info['fileId']
# b2_get_upload_part_url
print('b2_get_upload_part_url')
upload_part_dict = raw_api.get_upload_part_url(api_url, account_auth_token, large_file_id)
upload_part_url = upload_part_dict['uploadUrl']
upload_path_auth = upload_part_dict['authorizationToken']
# b2_upload_part
print('b2_upload_part')
part_contents = six.b('hello part')
part_sha1 = hex_sha1_of_stream(six.BytesIO(part_contents), len(part_contents))
raw_api.upload_part(
upload_part_url, upload_path_auth, 1, len(part_contents), part_sha1,
six.BytesIO(part_contents)
)
# b2_list_parts
print('b2_list_parts')
parts_response = raw_api.list_parts(api_url, account_auth_token, large_file_id, 1, 100)
assert [1] == [part['partNumber'] for part in parts_response['parts']]
# b2_list_unfinished_large_files
unfinished_list = raw_api.list_unfinished_large_files(api_url, account_auth_token, bucket_id)
assert [file_name] == [f_dict['fileName'] for f_dict in unfinished_list['files']]
assert file_info == unfinished_list['files'][0]['fileInfo']
# b2_finish_large_file
# We don't upload enough data to actually finish on, so we'll just
# check that the right error is returned.
print('b2_finish_large_file')
try:
raw_api.finish_large_file(api_url, account_auth_token, large_file_id, [part_sha1])
raise Exception('finish should have failed')
except Exception as e:
assert 'large files must have at least 2 parts' in str(e)
# b2_update_bucket
print('b2_update_bucket')
updated_bucket = raw_api.update_bucket(
api_url,
account_auth_token,
account_id,
bucket_id,
'allPrivate',
bucket_info={'color': 'blue'}
)
assert first_bucket_revision < updated_bucket['revision']
# clean up this test
_clean_and_delete_bucket(raw_api, api_url, account_auth_token, account_id, bucket_id)
# Clean up from old tests. Empty and delete any buckets more than an hour old.
for bucket_dict in bucket_list_dict['buckets']:
bucket_id = bucket_dict['bucketId']
bucket_name = bucket_dict['bucketName']
if _should_delete_bucket(bucket_name):
print('cleaning up old bucket: ' + bucket_name)
_clean_and_delete_bucket(raw_api, api_url, account_auth_token, account_id, bucket_id)
def _clean_and_delete_bucket(raw_api, api_url, account_auth_token, account_id, bucket_id):
# Delete the files. This test never creates more than a few files,
# so one call to list_file_versions should get them all.
versions_dict = raw_api.list_file_versions(api_url, account_auth_token, bucket_id)
for version_dict in versions_dict['files']:
file_id = version_dict['fileId']
file_name = version_dict['fileName']
action = version_dict['action']
if action in ['hide', 'upload']:
print('b2_delete_file', file_name, action)
raw_api.delete_file_version(api_url, account_auth_token, file_id, file_name)
else:
print('b2_cancel_large_file', file_name)
raw_api.cancel_large_file(api_url, account_auth_token, file_id)
# Delete the bucket
print('b2_delete_bucket', bucket_id)
raw_api.delete_bucket(api_url, account_auth_token, account_id, bucket_id)
def _should_delete_bucket(bucket_name):
# Bucket names for this test look like: c7b22d0b0ad7-1460060364-5670
# Other buckets should not be deleted.
match = re.match(r'^test-raw-api-[a-f0-9]+-([0-9]+)-([0-9]+)', bucket_name)
if match is None:
return False
# Is it more than an hour old?
bucket_time = int(match.group(1))
now = time.time()
return bucket_time + 3600 <= now
def _add_range_header(headers, range_):
if range_ is not None:
assert len(range_) == 2, range_
assert (range_[0] + 0) <= (range_[1] + 0), range_ # not strings
assert range_[0] >= 0, range_
headers['Range'] = "bytes=%d-%d" % range_
if __name__ == '__main__':
test_raw_api()
|
[] |
[] |
[
"TEST_APPLICATION_KEY",
"TEST_APPLICATION_KEY_ID"
] |
[]
|
["TEST_APPLICATION_KEY", "TEST_APPLICATION_KEY_ID"]
|
python
| 2 | 0 | |
sdk/client.go
|
package sdk
import (
"crypto/tls"
"net/http"
"net/url"
"os"
"github.com/ory-am/hydra/client"
"github.com/ory-am/hydra/jwk"
hoauth2 "github.com/ory-am/hydra/oauth2"
"github.com/ory-am/hydra/pkg"
"github.com/ory-am/hydra/policy"
"github.com/ory-am/hydra/warden"
"github.com/ory-am/hydra/warden/group"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
type option func(*Client) error
// default options for hydra client
var defaultOptions = []option{
ClusterURL(os.Getenv("HYDRA_CLUSTER_URL")),
ClientID(os.Getenv("HYDRA_CLIENT_ID")),
ClientSecret(os.Getenv("HYDRA_CLIENT_SECRET")),
Scopes("hydra"),
}
// Client offers easy use of all HTTP clients.
type Client struct {
// Clients offers OAuth2 Client management capabilities.
Clients *client.HTTPManager
// JSONWebKeys offers JSON Web Key management capabilities.
JSONWebKeys *jwk.HTTPManager
// Policies offers Access Policy management capabilities.
Policies *policy.HTTPManager
// Warden offers Access Token and Access Request validation strategies (for first-party resource servers).
Warden *warden.HTTPWarden
// Introspection offers Access Token and Access Request introspection strategies (according to RFC 7662).
Introspection *hoauth2.HTTPIntrospector
// Revocation offers OAuth2 Token Revocation.
Revocator *hoauth2.HTTPRecovator
// Groups offers warden group management capabilities.
Groups *group.HTTPManager
http *http.Client
clusterURL *url.URL
clientID string
clientSecret string
skipTLSVerify bool
scopes []string
credentials clientcredentials.Config
}
// Connect instantiates a new client to communicate with Hydra.
//
// import "github.com/ory-am/hydra/sdk"
//
// var hydra, err = sdk.Connect(
// sdk.ClientID("client-id"),
// sdk.ClientSecret("client-secret"),
// sdk.ClusterURL("https://localhost:4444"),
// )
func Connect(opts ...option) (*Client, error) {
c := &Client{}
var err error
// apply default options
for _, opt := range defaultOptions {
err = opt(c)
if err != nil {
return nil, err
}
}
// override any default values with given options
for _, opt := range opts {
err = opt(c)
if err != nil {
return nil, err
}
}
c.credentials = clientcredentials.Config{
ClientID: c.clientID,
ClientSecret: c.clientSecret,
TokenURL: pkg.JoinURL(c.clusterURL, "oauth2/token").String(),
Scopes: c.scopes,
}
c.http = http.DefaultClient
if c.skipTLSVerify {
c.http = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
}
err = c.authenticate()
if err != nil {
return nil, err
}
// initialize service endpoints
c.Clients = &client.HTTPManager{
Endpoint: pkg.JoinURL(c.clusterURL, "/clients"),
Client: c.http,
}
c.Revocator = &hoauth2.HTTPRecovator{
Endpoint: pkg.JoinURL(c.clusterURL, hoauth2.RevocationPath),
Config: &c.credentials,
}
c.Introspection = &hoauth2.HTTPIntrospector{
Endpoint: pkg.JoinURL(c.clusterURL, hoauth2.IntrospectPath),
Client: c.http,
}
c.JSONWebKeys = &jwk.HTTPManager{
Endpoint: pkg.JoinURL(c.clusterURL, "/keys"),
Client: c.http,
}
c.Policies = &policy.HTTPManager{
Endpoint: pkg.JoinURL(c.clusterURL, "/policies"),
Client: c.http,
}
c.Warden = &warden.HTTPWarden{
Client: c.http,
Endpoint: c.clusterURL,
}
c.Groups = &group.HTTPManager{
Endpoint: pkg.JoinURL(c.clusterURL, "/warden/groups"),
Client: c.http,
}
return c, nil
}
// OAuth2Config returns an oauth2 config instance which you can use to initiate various oauth2 flows.
//
// config := client.OAuth2Config("https://mydomain.com/oauth2_callback", "photos", "contacts.read")
// redirectRequestTo := oauth2.AuthCodeURL()
//
// // in callback handler...
// token, err := config.Exchange(oauth2.NoContext, authorizeCode)
func (h *Client) OAuth2Config(redirectURL string, scopes ...string) *oauth2.Config {
return &oauth2.Config{
ClientSecret: h.clientSecret,
ClientID: h.clientID,
Endpoint: oauth2.Endpoint{
TokenURL: pkg.JoinURL(h.clusterURL, "/oauth2/token").String(),
AuthURL: pkg.JoinURL(h.clusterURL, "/oauth2/auth").String(),
},
Scopes: scopes,
RedirectURL: redirectURL,
}
}
func (h *Client) authenticate() error {
ctx := context.WithValue(oauth2.NoContext, oauth2.HTTPClient, h.http)
_, err := h.credentials.Token(ctx)
if err != nil {
return err
}
h.http = h.credentials.Client(ctx)
return nil
}
|
[
"\"HYDRA_CLUSTER_URL\"",
"\"HYDRA_CLIENT_ID\"",
"\"HYDRA_CLIENT_SECRET\""
] |
[] |
[
"HYDRA_CLUSTER_URL",
"HYDRA_CLIENT_SECRET",
"HYDRA_CLIENT_ID"
] |
[]
|
["HYDRA_CLUSTER_URL", "HYDRA_CLIENT_SECRET", "HYDRA_CLIENT_ID"]
|
go
| 3 | 0 | |
core/src/test/java/org/bitcoinj/utils/AppDataDirectoryTest.java
|
/*
* Copyright 2019 Michael Sean Gilligan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bitcoinj.utils;
import org.bitcoinj.core.Utils;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* Basic test of AppDataDirectory
*/
public class AppDataDirectoryTest {
private static final String HOMEDIR = System.getProperty("user.home");
private static final String WINAPPDATA = System.getenv("APPDATA");
@Test
public void worksOnCurrentPlatform() {
final String appName = "bitcoinj";
String path = AppDataDirectory.get(appName).toString();
if (Utils.isWindows()) {
assertEquals("Path wrong on Mac", winPath(appName), path);
} else if (Utils.isMac()) {
assertEquals("Path wrong on Mac", macPath(appName), path);
} else if (Utils.isLinux()) {
assertEquals("Path wrong on Linux", unixPath(appName), path);
} else {
assertEquals("Path wrong on unknown/default", unixPath(appName), path);
}
}
@Test
public void worksOnCurrentPlatformForBitcoinCore() {
final String appName = "Bitcoin";
String path = AppDataDirectory.get(appName).toString();
if (Utils.isWindows()) {
assertEquals("Path wrong on Mac", winPath(appName), path);
} else if (Utils.isMac()) {
assertEquals("Path wrong on Mac", macPath(appName), path);
} else if (Utils.isLinux()) {
assertEquals("Path wrong on Linux", unixPath(appName), path);
} else {
assertEquals("Path wrong on unknown/default", unixPath(appName), path);
}
}
private static String winPath(String appName) {
return WINAPPDATA + "\\." + appName.toLowerCase();
}
private static String macPath(String appName) {
return HOMEDIR + "/Library/Application Support/" + appName;
}
private static String unixPath(String appName) {
return HOMEDIR + "/." + appName.toLowerCase();
}
}
|
[
"\"APPDATA\""
] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
java
| 1 | 0 | |
cddagl/functions.py
|
import logging
import os
import re
import traceback
from io import StringIO
import winutils
from pywintypes import com_error
import cddagl
from cddagl.i18n import proxy_gettext as _
from cddagl.sql.functions import get_config_value, config_true
version = cddagl.__version__
logger = logging.getLogger('cddagl')
def log_exception(extype, value, tb):
tb_io = StringIO()
traceback.print_tb(tb, file=tb_io)
logger.critical(_('Global error:\nLauncher version: {version}\nType: '
'{extype}\nValue: {value}\nTraceback:\n{traceback}').format(
version=cddagl.__version__, extype=str(extype), value=str(value),
traceback=tb_io.getvalue()))
def ensure_slash(path):
"""Return path making sure it has a trailing slash at the end."""
return os.path.join(path, '')
def unique(seq):
"""Return unique entries in a unordered sequence while original order."""
seen = set()
for x in seq:
if x not in seen:
seen.add(x)
yield x
def clean_qt_path(path):
return path.replace('/', '\\')
def safe_filename(filename):
keepcharacters = (' ', '.', '_', '-')
return ''.join(c for c in filename if c.isalnum() or c in keepcharacters
).strip()
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return arstrip([tryint(c) for c in re.split('([0-9]+)', s)])
def arstrip(value):
while len(value) > 1 and value[-1:] == ['']:
value = value[:-1]
return value
def is_64_windows():
return 'PROGRAMFILES(X86)' in os.environ
def bitness():
if is_64_windows():
return _('64-bit')
else:
return _('32-bit')
def sizeof_fmt(num, suffix=None):
if suffix is None:
suffix = _('B')
for unit in ['', _('Ki'), _('Mi'), _('Gi'), _('Ti'), _('Pi'), _('Ei'),
_('Zi')]:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, _('Yi'), suffix)
def delete_path(path):
''' Move directory or file in the recycle bin (or permanently delete it
depending on the settings used) using the built in Windows File
operations dialog
'''
# Make sure we have an absolute path first
if not os.path.isabs(path):
path = os.path.abspath(path)
shellcon = winutils.shellcon
permanently_delete_files = config_true(
get_config_value('permanently_delete_files', 'False'))
if permanently_delete_files:
flags = 0
else:
flags = shellcon.FOF_ALLOWUNDO
flags = (flags |
shellcon.FOF_SILENT |
shellcon.FOF_NOCONFIRMATION |
shellcon.FOF_WANTNUKEWARNING
)
try:
return winutils.delete(path, flags)
except com_error:
return False
def move_path(srcpath, dstpath):
''' Move srcpath to dstpath using using the built in Windows File
operations dialog
'''
# Make sure we have absolute paths first
if not os.path.isabs(srcpath):
srcpath = os.path.abspath(srcpath)
if not os.path.isabs(dstpath):
dstpath = os.path.abspath(dstpath)
shellcon = winutils.shellcon
flags = (
shellcon.FOF_ALLOWUNDO |
shellcon.FOF_SILENT |
shellcon.FOF_NOCONFIRMMKDIR |
shellcon.FOF_NOCONFIRMATION |
shellcon.FOF_WANTNUKEWARNING
)
try:
return winutils.move(srcpath, dstpath, flags)
except com_error:
return False
def safe_humanize(arrow_date, other=None, locale='en_us', only_distance=False, granularity='auto'):
try:
# Can we use the normal humanize method?
return arrow_date.humanize(other=other, locale=locale, only_distance=only_distance,
granularity=granularity)
except ValueError:
# On first fail, let's try with day granularity
try:
return arrow_date.humanize(other=other, locale=locale, only_distance=only_distance,
granularity='day')
except ValueError:
# On final fail, use en_us locale which should be translated
return arrow_date.humanize(other=other, locale='en_us', only_distance=only_distance,
granularity='auto')
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/wopiserver.py
|
#!/usr/bin/env python3
'''
wopiserver.py
The Web-application Open Platform Interface (WOPI) gateway for the ScienceMesh IOP
Author: Giuseppe Lo Presti (@glpatcern), CERN/IT-ST
Contributions: see README.md
'''
import sys
import os
import time
import socket
import configparser
from platform import python_version
import logging
import logging.handlers
from urllib.parse import unquote as url_unquote
import http.client
import json
try:
import flask # Flask app server
from werkzeug.exceptions import NotFound as Flask_NotFound
from werkzeug.exceptions import MethodNotAllowed as Flask_MethodNotAllowed
import jwt # JSON Web Tokens support
from prometheus_flask_exporter import PrometheusMetrics # Prometheus support
except ImportError:
print("Missing modules, please install dependencies with `pip3 install -f requirements.txt`")
raise
import core.wopi
import core.discovery
import core.ioplocks
import core.wopiutils as utils
import bridge
# the following constant is replaced on the fly when generating the docker image
WOPISERVERVERSION = 'git'
# convenience constant for returning 401
UNAUTHORIZED = 'Client not authorized', http.client.UNAUTHORIZED
# alias of the storage layer module, see function below
storage = None
def storage_layer_import(storagetype):
'''A convenience function to import the storage layer module specified in the config and make it globally available'''
global storage # pylint: disable=global-statement
if storagetype in ['local', 'xroot', 'cs3']:
storagetype += 'iface'
else:
raise ImportError('Unsupported/Unknown storage type %s' % storagetype)
try:
storage = __import__('core.' + storagetype, globals(), locals(), [storagetype])
except ImportError:
print("Missing module when attempting to import %s.py. Please make sure dependencies are met." % storagetype)
raise
class Wopi:
'''A singleton container for all state information of the WOPI server'''
app = flask.Flask("wopiserver")
metrics = PrometheusMetrics(app, group_by='endpoint')
port = 0
lastConfigReadTime = time.time()
loglevels = {"Critical": logging.CRITICAL, # 50
"Error": logging.ERROR, # 40
"Warning": logging.WARNING, # 30
"Info": logging.INFO, # 20
"Debug": logging.DEBUG # 10
}
log = utils.JsonLogger(app.logger)
openfiles = {}
@classmethod
def init(cls):
'''Initialises the application, bails out in case of failures. Note this is not a __init__ method'''
try:
# detect hostname, or take it from the environment if set e.g. by docker
hostname = os.environ.get('HOST_HOSTNAME')
if not hostname:
hostname = socket.gethostname()
# configure the logging
loghandler = logging.FileHandler('/var/log/wopi/wopiserver.log')
loghandler.setFormatter(logging.Formatter(
fmt='{"time": "%(asctime)s.%(msecs)03d", "host": "' + hostname + \
'", "level": "%(levelname)s", "process": "%(name)s", %(message)s}',
datefmt='%Y-%m-%dT%H:%M:%S'))
cls.app.logger.handlers = [loghandler]
# read the configuration
cls.config = configparser.ConfigParser()
with open('/etc/wopi/wopiserver.defaults.conf') as fdef:
cls.config.read_file(fdef)
cls.config.read('/etc/wopi/wopiserver.conf')
# load the requested storage layer
storage_layer_import(cls.config.get('general', 'storagetype'))
# prepare the Flask web app
cls.port = int(cls.config.get('general', 'port'))
cls.log.setLevel(cls.loglevels[cls.config.get('general', 'loglevel')])
try:
cls.nonofficetypes = cls.config.get('general', 'nonofficetypes').split()
except (TypeError, configparser.NoOptionError) as e:
cls.nonofficetypes = []
with open(cls.config.get('security', 'wopisecretfile')) as s:
cls.wopisecret = s.read().strip('\n')
with open(cls.config.get('security', 'iopsecretfile')) as s:
cls.iopsecret = s.read().strip('\n')
cls.tokenvalidity = cls.config.getint('general', 'tokenvalidity')
core.wopi.enablerename = cls.config.get('general', 'enablerename', fallback='False').upper() in ('TRUE', 'YES')
storage.init(cls.config, cls.log) # initialize the storage layer
cls.useHttps = cls.config.get('security', 'usehttps').lower() == 'yes'
# validate the certificates exist if running in https mode
if cls.useHttps:
try:
with open(cls.config.get('security', 'wopicert')) as _:
pass
with open(cls.config.get('security', 'wopikey')) as _:
pass
except OSError as e:
cls.log.error('msg="Failed to open the provided certificate or key to start in https mode"')
raise
cls.wopiurl = cls.config.get('general', 'wopiurl')
if cls.config.has_option('general', 'lockpath'):
cls.lockpath = cls.config.get('general', 'lockpath')
else:
cls.lockpath = ''
_ = cls.config.get('general', 'downloadurl') # make sure this is defined
# WOPI proxy configuration (optional)
cls.wopiproxy = cls.config.get('general', 'wopiproxy', fallback='')
cls.wopiproxykey = cls.config.get('general', 'wopiproxykey', fallback='x')
cls.proxiedappname = cls.config.get('general', 'proxiedappname', fallback='')
# initialize the bridge
bridge.WB.init(cls.config, cls.log, cls.wopisecret)
# initialize the submodules
# TODO improve handling of globals across the whole code base
utils.srv = core.ioplocks.srv = core.wopi.srv = cls
utils.log = core.ioplocks.log = core.wopi.log = core.discovery.log = cls.log
utils.st = core.ioplocks.st = core.wopi.st = storage
core.discovery.config = cls.config
utils.endpoints = core.discovery.endpoints
except (configparser.NoOptionError, OSError) as e:
# any error we get here with the configuration is fatal
cls.log.fatal('msg="Failed to initialize the service, aborting" error="%s"' % e)
print("Failed to initialize the service: %s\n" % e, file=sys.stderr)
sys.exit(22)
@classmethod
def refreshconfig(cls):
'''Re-read the configuration file every 300 secs to catch any runtime parameter change'''
if time.time() > cls.lastConfigReadTime + 300:
cls.lastConfigReadTime = time.time()
cls.config.read('/etc/wopi/wopiserver.conf')
# refresh some general parameters
cls.tokenvalidity = cls.config.getint('general', 'tokenvalidity')
cls.log.setLevel(cls.loglevels[cls.config.get('general', 'loglevel')])
@classmethod
def run(cls):
'''Runs the Flask app in either standalone (https) or embedded (http) mode'''
cls.app.debug = cls.config.get('general', 'loglevel') == 'Debug'
cls.app.threaded = True
if cls.useHttps:
cls.app.ssl_context = (cls.config.get('security', 'wopicert'), cls.config.get('security', 'wopikey'))
cls.log.info('msg="WOPI Server starting in standalone secure mode" port="%d" wopiurl="%s" version="%s"' %
(cls.port, cls.wopiurl, WOPISERVERVERSION))
else:
cls.app.ssl_context = None
cls.log.info('msg="WOPI Server starting in unsecure/embedded mode" port="%d" wopiurl="%s" version="%s"' %
(cls.port, cls.wopiurl, WOPISERVERVERSION))
if cls.config.get('general', 'internalserver', fallback='flask') == 'waitress':
try:
from waitress import serve
except ImportError:
cls.log.fatal('msg="Failed to initialize the service, aborting" error="missing module waitress"')
print("Missing module waitress, aborting")
raise
serve(cls.app, host='0.0.0.0', port=cls.port)
else:
cls.app.run(host='0.0.0.0', port=cls.port, ssl_context=cls.app.ssl_context)
@Wopi.app.errorhandler(Exception)
def handleException(ex):
'''Generic method to log any uncaught exception'''
if isinstance(ex, (Flask_NotFound, Flask_MethodNotAllowed)):
return ex
return utils.logGeneralExceptionAndReturn(ex, flask.request)
@Wopi.app.route("/", methods=['GET'])
def redir():
'''A simple redirect to the page below'''
return flask.redirect("/wopi")
@Wopi.app.route("/wopi", methods=['GET'])
def index():
'''Return a default index page with some user-friendly information about this service'''
Wopi.log.debug('msg="Accessed index page" client="%s"' % flask.request.remote_addr)
resp = flask.Response("""
<html><head><title>ScienceMesh WOPI Server</title></head>
<body>
<div align="center" style="color:#000080; padding-top:50px; font-family:Verdana; size:11">
This is the ScienceMesh IOP <a href=http://wopi.readthedocs.io>WOPI</a> server to support online office-like editors.<br>
The service includes support for non-WOPI-native apps through a bridge extension.<br>
To use this service, please log in to your EFSS Storage and click on a supported document.</div>
<div style="position: absolute; bottom: 10px; left: 10px; width: 99%%;"><hr>
<i>ScienceMesh WOPI Server %s at %s. Powered by Flask %s for Python %s</i>.
</body>
</html>
""" % (WOPISERVERVERSION, socket.getfqdn(), flask.__version__, python_version()))
resp.headers['X-Frame-Options'] = 'sameorigin'
resp.headers['X-XSS-Protection'] = '1; mode=block'
return resp
#
# IOP endpoints
#
@Wopi.app.route("/wopi/iop/openinapp", methods=['GET'])
@Wopi.metrics.do_not_track()
@Wopi.metrics.counter('open_by_app', 'Number of /open calls by appname',
labels={ 'open_type': lambda: flask.request.args.get('appname') })
def iopOpenInApp():
'''Generates a WOPISrc target and an access token to be passed to a WOPI-compatible Office-like app
for accessing a given file for a given user.
Headers:
- Authorization: a bearer shared secret to protect this call as it provides direct access to any user's file
- TokenHeader: an x-access-token to serve as user identity towards Reva
- ApiKey (optional): a shared secret to be used with the end-user application if required
Request arguments:
- enum viewmode: how the user should access the file, according to utils.ViewMode/the CS3 app provider API
- string fileid: the Reva fileid of the file to be opened
- string endpoint (optional): the storage endpoint to be used to look up the file or the storage id, in case of
multi-instance underlying storage; defaults to 'default'
- string username (optional): user's full display name, typically shown by the app; defaults to
'Guest ' + 3 random letters to represent anonymous users
- string userid (optional): an unique identifier for the user, used internally by the app; defaults to
a random string of 10 characters to represent anonymous users
- string folderurl (optional): the URL to come back to the containing folder for this file, typically shown by the app
- string appname: the identifier of the end-user application to be served
- string appurl: the URL of the end-user application
- string appviewurl (optional): the URL of the end-user application in view mode when different (defaults to appurl)
- string appinturl (optional): the internal URL of the end-user application (applicable with containerized deployments)
Returns: a JSON response as follows:
{
"app-url" : "<URL of the target application with query parameters>",
"form-parameters" : { "access_token" : "<WOPI access token>" }
}
or a message and a 4xx/5xx HTTP code in case of errors
'''
Wopi.refreshconfig()
req = flask.request
# validate tokens
if req.headers.get('Authorization') != 'Bearer ' + Wopi.iopsecret:
Wopi.log.warning('msg="iopOpenInApp: unauthorized access attempt, missing authorization token" ' \
'client="%s" clientAuth="%s"' % (req.remote_addr, req.headers.get('Authorization')))
return UNAUTHORIZED
try:
usertoken = req.headers['TokenHeader']
except KeyError:
Wopi.log.warning('msg="iopOpenInApp: missing TokenHeader in request" client="%s"' % req.remote_addr)
return UNAUTHORIZED
# validate all parameters
fileid = req.args.get('fileid', '')
if not fileid:
Wopi.log.warning('msg="iopOpenInApp: fileid must be provided" client="%s"' % req.remote_addr)
return 'Missing fileid argument', http.client.BAD_REQUEST
try:
viewmode = utils.ViewMode(req.args['viewmode'])
except (KeyError, ValueError) as e:
Wopi.log.warning('msg="iopOpenInApp: invalid viewmode parameter" client="%s" viewmode="%s" error="%s"' %
(req.remote_addr, req.args.get('viewmode'), e))
return 'Missing or invalid viewmode argument', http.client.BAD_REQUEST
username = req.args.get('username', '')
# this needs to be a unique identifier: if missing (case of anonymous users), just generate a random string
wopiuser = req.args.get('userid', utils.randomString(10))
folderurl = url_unquote(req.args.get('folderurl', '%2F')) # defaults to `/`
endpoint = req.args.get('endpoint', 'default')
appname = url_unquote(req.args.get('appname', ''))
appurl = url_unquote(req.args.get('appurl', '')).strip('/')
appviewurl = url_unquote(req.args.get('appviewurl', appurl)).strip('/')
if not appname or not appurl:
Wopi.log.warning('msg="iopOpenInApp: app-related arguments must be provided" client="%s"' % req.remote_addr)
return 'Missing appname or appurl arguments', http.client.BAD_REQUEST
if bridge.issupported(appname):
# This is a bridge-supported application, get the extra info to enable it
apikey = req.headers.get('ApiKey')
appinturl = url_unquote(req.args.get('appinturl', appurl)) # defaults to the external appurl
try:
bridge.WB.loadplugin(appname, appurl, appinturl, apikey)
except ValueError:
return 'Failed to load WOPI bridge plugin for %s' % appname, http.client.INTERNAL_SERVER_ERROR
# for the WOPI context, bridge-supported app URLs look like this, though they are not used
appurl = appviewurl = Wopi.wopiurl + '/wopi/bridge/open'
try:
userid = storage.getuseridfromcreds(usertoken, wopiuser)
if userid != usertoken:
# this happens in hybrid deployments with xrootd as storage interface:
# in this case we override the wopiuser with the resolved uid:gid
wopiuser = userid
inode, acctok = utils.generateAccessToken(userid, fileid, viewmode, (username, wopiuser), folderurl, endpoint,
(appname, appurl, appviewurl))
except IOError as e:
Wopi.log.info('msg="iopOpenInApp: remote error on generating token" client="%s" user="%s" ' \
'friendlyname="%s" mode="%s" endpoint="%s" reason="%s"' %
(req.remote_addr, usertoken[-20:], username, viewmode, endpoint, e))
return 'Remote error, file not found or file is a directory', http.client.NOT_FOUND
res = {}
if bridge.issupported(appname):
try:
res['app-url'], res['form-parameters'] = bridge.appopen(utils.generateWopiSrc(inode), acctok)
except bridge.FailedOpen as foe:
return foe.msg, foe.statuscode
else:
res['app-url'] = appurl if viewmode == utils.ViewMode.READ_WRITE else appviewurl
res['app-url'] += '%sWOPISrc=%s' % ('&' if '?' in res['app-url'] else '?', \
utils.generateWopiSrc(inode, appname == Wopi.proxiedappname))
res['form-parameters'] = {'access_token' : acctok}
return flask.Response(json.dumps(res), mimetype='application/json')
@Wopi.app.route("/wopi/iop/download", methods=['GET'])
def iopDownload():
'''Returns the file's content for a given valid access token. Used as a download URL,
so that the path and possibly the x-access-token are never explicitly visible.'''
try:
acctok = jwt.decode(flask.request.args['access_token'], Wopi.wopisecret, algorithms=['HS256'])
if acctok['exp'] < time.time():
raise jwt.exceptions.ExpiredSignatureError
resp = flask.Response(storage.readfile(acctok['endpoint'], acctok['filename'], acctok['userid']), \
mimetype='application/octet-stream')
resp.headers['Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(acctok['filename'])
resp.headers['X-Frame-Options'] = 'sameorigin'
resp.headers['X-XSS-Protection'] = '1; mode=block'
resp.status_code = http.client.OK
Wopi.log.info('msg="cboxDownload: direct download succeeded" filename="%s" user="%s" token="%s"' %
(acctok['filename'], acctok['userid'][-20:], flask.request.args['access_token'][-20:]))
return resp
except IOError as e:
Wopi.log.info('msg="Requested file not found" filename="%s" token="%s" error="%s"' %
(acctok['filename'], flask.request.args['access_token'][-20:], e))
return 'File not found', http.client.NOT_FOUND
except (jwt.exceptions.DecodeError, jwt.exceptions.ExpiredSignatureError) as e:
Wopi.log.warning('msg="Signature verification failed" client="%s" requestedUrl="%s" token="%s"' %
(flask.request.remote_addr, flask.request.base_url, flask.request.args['access_token']))
return 'Invalid access token', http.client.UNAUTHORIZED
except KeyError as e:
Wopi.log.warning('msg="Invalid access token or request argument" error="%s" request="%s"' % (e, flask.request.__dict__))
return 'Invalid request', http.client.UNAUTHORIZED
@Wopi.app.route("/wopi/iop/list", methods=['GET'])
def iopGetOpenFiles():
'''Returns a list of all currently open files, for operators only.
This call is protected by the same shared secret as the /wopi/iop/openinapp call.'''
req = flask.request
if req.headers.get('Authorization') != 'Bearer ' + Wopi.iopsecret:
Wopi.log.warning('msg="iopGetOpenFiles: unauthorized access attempt, missing authorization token" ' \
'client="%s"' % req.remote_addr)
return UNAUTHORIZED
# first convert the sets into lists, otherwise sets cannot be serialized in JSON format
jlist = {}
for f in list(Wopi.openfiles.keys()):
jlist[f] = (Wopi.openfiles[f][0], tuple(Wopi.openfiles[f][1]))
# dump the current list of opened files in JSON format
Wopi.log.info('msg="iopGetOpenFiles: returning list of open files" client="%s"' % req.remote_addr)
return flask.Response(json.dumps(jlist), mimetype='application/json')
#
# WOPI protocol implementation
#
@Wopi.app.route("/wopi/files/<fileid>", methods=['GET'])
def wopiCheckFileInfo(fileid):
'''The CheckFileInfo WOPI call'''
return core.wopi.checkFileInfo(fileid)
@Wopi.app.route("/wopi/files/<fileid>/contents", methods=['GET'])
def wopiGetFile(fileid):
'''The GetFile WOPI call'''
return core.wopi.getFile(fileid)
@Wopi.app.route("/wopi/files/<fileid>", methods=['POST'])
def wopiFilesPost(fileid):
'''A dispatcher metod for all POST operations on files'''
Wopi.refreshconfig()
try:
acctok = jwt.decode(flask.request.args['access_token'], Wopi.wopisecret, algorithms=['HS256'])
if acctok['exp'] < time.time():
raise jwt.exceptions.ExpiredSignatureError
headers = flask.request.headers
op = headers['X-WOPI-Override'] # must be one of the following strings, throws KeyError if missing
if op != 'GET_LOCK' and utils.ViewMode(acctok['viewmode']) != utils.ViewMode.READ_WRITE:
# protect this call if the WOPI client does not have privileges
return 'Attempting to perform a write operation using a read-only token', http.client.UNAUTHORIZED
if op in ('LOCK', 'REFRESH_LOCK'):
return core.wopi.setLock(fileid, headers, acctok)
if op == 'UNLOCK':
return core.wopi.unlock(fileid, headers, acctok)
if op == 'GET_LOCK':
return core.wopi.getLock(fileid, headers, acctok)
if op == 'PUT_RELATIVE':
return core.wopi.putRelative(fileid, headers, acctok)
if op == 'DELETE':
return core.wopi.deleteFile(fileid, headers, acctok)
if op == 'RENAME_FILE':
return core.wopi.renameFile(fileid, headers, acctok)
#elif op == 'PUT_USER_INFO': https://wopirest.readthedocs.io/en/latest/files/PutUserInfo.html
# Any other op is unsupported
Wopi.log.warning('msg="Unknown/unsupported operation" operation="%s"' % op)
return 'Not supported operation found in header', http.client.NOT_IMPLEMENTED
except (jwt.exceptions.DecodeError, jwt.exceptions.ExpiredSignatureError) as e:
Wopi.log.warning('msg="Signature verification failed" client="%s" requestedUrl="%s" error="%s" token="%s"' %
(flask.request.remote_addr, flask.request.base_url, e, flask.request.args['access_token']))
return 'Invalid access token', http.client.UNAUTHORIZED
except KeyError as e:
Wopi.log.warning('msg="Missing argument" client="%s" requestedUrl="%s" error="%s" token="%s"' %
(flask.request.remote_addr, flask.request.base_url, e, flask.request.args.get('access_token')))
return 'Missing argument: %s' % e, http.client.BAD_REQUEST
@Wopi.app.route("/wopi/files/<fileid>/contents", methods=['POST'])
def wopiPutFile(fileid):
'''The PutFile WOPI call'''
return core.wopi.putFile(fileid)
#
# IOP lock endpoints
#
@Wopi.app.route("/wopi/cbox/lock", methods=['GET', 'POST'])
@Wopi.metrics.counter('lock_by_ext', 'Number of /lock calls by file extension',
labels={'open_type': lambda:
(flask.request.args['filename'].split('.')[-1] \
if 'filename' in flask.request.args and '.' in flask.request.args['filename'] \
else 'noext') if flask.request.method == 'POST' else 'query'
})
def cboxLock():
'''Lock a given filename so that a later WOPI lock call would detect a conflict.
Used for OnlyOffice as they do not use WOPI: this way better interoperability is ensured.
It creates a LibreOffice-compatible lock, which is checked by the WOPI lock call
as well as by LibreOffice.
Method: POST to create a lock, GET to query for it
Request arguments:
- string filename: the full path of the filename to be opened
- string userid (optional): the user identity to create the file, defaults to 'root:root'
- string endpoint (optional): the storage endpoint to be used to look up the file or the storage id, in case of
multi-instance underlying storage; defaults to 'default'
'''
req = flask.request
# first check if the shared secret matches ours
if req.headers.get('Authorization') != 'Bearer ' + Wopi.iopsecret:
Wopi.log.warning('msg="cboxLock: unauthorized access attempt, missing authorization token" '
'client="%s"' % req.remote_addr)
return UNAUTHORIZED
filename = req.args['filename']
userid = req.args['userid'] if 'userid' in req.args else '0:0'
endpoint = req.args['endpoint'] if 'endpoint' in req.args else 'default'
return core.ioplocks.ioplock(filename, userid, endpoint, req.method == 'GET')
@Wopi.app.route("/wopi/cbox/unlock", methods=['POST'])
def cboxUnlock():
'''Unlock a given filename. Used for OnlyOffice as they do not use WOPI (see cboxLock).
Request arguments:
- string filename: the full path of the filename to be opened
- string userid (optional): the user identity to create the file, defaults to 'root:root'
- string endpoint (optional): the storage endpoint to be used to look up the file or the storage id, in case of
multi-instance underlying storage; defaults to 'default'
The call returns:
- HTTP UNAUTHORIZED (401) if the 'Authorization: Bearer' secret is not provided in the header (cf. /wopi/cbox/open)
- HTTP CONFLICT (409) if a lock exists, but held by another application
- HTTP NOT_FOUND (404) if no lock was found for the given file
- HTTP INTERNAL_SERVER_ERROR (500) if some other I/O error occurred with the given lock file
- HTTP OK (200) if a lock for OnlyOffice existed. In this case it is removed.
'''
req = flask.request
# first check if the shared secret matches ours
if req.headers.get('Authorization') != 'Bearer ' + Wopi.iopsecret:
Wopi.log.warning('msg="cboxUnlock: unauthorized access attempt, missing authorization token" ' \
'client="%s"' % req.remote_addr)
return UNAUTHORIZED
filename = req.args['filename']
userid = req.args['userid'] if 'userid' in req.args else '0:0'
endpoint = req.args['endpoint'] if 'endpoint' in req.args else 'default'
return core.ioplocks.iopunlock(filename, userid, endpoint)
#
# Bridge functionality
#
def _guireturn(msg):
'''One-liner to better render messages that are visible in the UI'''
return '<div align="center" style="color:#808080; padding-top:50px; font-family:Verdana">%s</div>' % msg
@Wopi.app.route("/wopi/bridge/open", methods=["GET"])
def bridgeOpen():
'''The WOPI bridge open call'''
try:
wopisrc = url_unquote(flask.request.args['WOPISrc'])
acctok = flask.request.args['access_token']
Wopi.log.info('msg="BridgeOpen called" client="%s" user-agent="%s" token="%s"' %
(flask.request.remote_addr, flask.request.user_agent, acctok[-20:]))
appurl, _ = bridge.appopen(wopisrc, acctok)
# for now we know that the second member is {} as in Revaold we only redirect
return flask.redirect(appurl)
except KeyError as e:
Wopi.log.warning('msg="BridgeOpen: unable to open the file, missing WOPI context" error="%s"' % e)
return _guireturn('Missing arguments'), http.client.BAD_REQUEST
except bridge.FailedOpen as foe:
return _guireturn(foe.msg), foe.statuscode
@Wopi.app.route("/wopi/bridge/<docid>", methods=["POST"])
@Wopi.metrics.do_not_track()
def bridgeSave(docid):
'''The WOPI bridge save call'''
return bridge.appsave(docid)
@Wopi.app.route("/wopi/bridge/list", methods=["GET"])
def bridgeList():
'''Return a list of all currently opened files in bridge mode, for operators only'''
return bridge.applist()
#
# Deprecated cbox endpoints
#
@Wopi.app.route("/wopi/cbox/open", methods=['GET'])
@Wopi.metrics.do_not_track()
@Wopi.metrics.counter('open_by_ext', 'Number of /open calls by file extension',
labels={'open_type': lambda:
flask.request.args['filename'].split('.')[-1] \
if 'filename' in flask.request.args and '.' in flask.request.args['filename'] \
else ('noext' if 'filename' in flask.request.args else 'fileid')
})
def cboxOpen_deprecated():
'''Generates a WOPISrc target and an access token to be passed to a WOPI-compatible Office-like app
for accessing a given file for a given user.
Required headers:
- Authorization: a bearer shared secret to protect this call as it provides direct access to any user's file
Request arguments:
- int ruid, rgid: a real Unix user identity (id:group) representing the user accessing the file
- enum viewmode: how the user should access the file, according to utils.ViewMode/the CS3 app provider API
- OR bool canedit: True if full access should be given to the user, otherwise read-only access is granted
- string username (optional): user's full display name, typically shown by the Office app
- string filename: the full path of the filename to be opened
- string endpoint (optional): the storage endpoint to be used to look up the file or the storage id, in case of
multi-instance underlying storage; defaults to 'default'
- string folderurl (optional): the URL to come back to the containing folder for this file, typically shown by the Office app
Returns: a single string with the application URL, or a message and a 4xx/5xx HTTP code in case of errors
'''
Wopi.refreshconfig()
req = flask.request
# if running in https mode, first check if the shared secret matches ours
if req.headers.get('Authorization') != 'Bearer ' + Wopi.iopsecret:
Wopi.log.warning('msg="cboxOpen: unauthorized access attempt, missing authorization token" ' \
'client="%s" clientAuth="%s"' % (req.remote_addr, req.headers.get('Authorization')))
return UNAUTHORIZED
# now validate the user identity and deny root access
try:
userid = 'N/A'
ruid = int(req.args['ruid'])
rgid = int(req.args['rgid'])
userid = '%d:%d' % (ruid, rgid)
if ruid == 0 or rgid == 0:
raise ValueError
except ValueError:
Wopi.log.warning('msg="cboxOpen: invalid or missing user/token in request" client="%s" user="%s"' %
(req.remote_addr, userid))
return UNAUTHORIZED
filename = url_unquote(req.args.get('filename', ''))
if filename == '':
Wopi.log.warning('msg="cboxOpen: the filename must be provided" client="%s"' % req.remote_addr)
return 'Invalid argument', http.client.BAD_REQUEST
if 'viewmode' in req.args:
try:
viewmode = utils.ViewMode(req.args['viewmode'])
except ValueError:
Wopi.log.warning('msg="cboxOpen: invalid viewmode parameter" client="%s" viewmode="%s"' %
(req.remote_addr, req.args['viewmode']))
return 'Invalid argument', http.client.BAD_REQUEST
else:
# backwards compatibility
viewmode = utils.ViewMode.READ_WRITE if 'canedit' in req.args and req.args['canedit'].lower() == 'true' \
else utils.ViewMode.READ_ONLY
username = req.args.get('username', '')
folderurl = url_unquote(req.args.get('folderurl', '%2F')) # defaults to `/`
endpoint = req.args.get('endpoint', 'default')
try:
# here we set wopiuser = userid (i.e. uid:gid) as that's well known to be consistent over time
inode, acctok = utils.generateAccessToken(userid, filename, viewmode, (username, userid), \
folderurl, endpoint, ('', '', ''))
except IOError as e:
Wopi.log.warning('msg="cboxOpen: remote error on generating token" client="%s" user="%s" ' \
'friendlyname="%s" mode="%s" endpoint="%s" reason="%s"' %
(req.remote_addr, userid, username, viewmode, endpoint, e))
return 'Remote error, file or app not found or file is a directory', http.client.NOT_FOUND
if bridge.isextsupported(os.path.splitext(filename)[1][1:]):
# call the bridgeOpen right away, to not expose the WOPI URL to the user (it might be behind firewall)
try:
appurl, _ = bridge.appopen(utils.generateWopiSrc(inode), acctok)
Wopi.log.debug('msg="cboxOpen: returning bridged app" URL="%s"' % appurl[appurl.rfind('/'):])
return appurl[appurl.rfind('/'):] # return the payload as the appurl is already known via discovery
except bridge.FailedOpen as foe:
Wopi.log.warning('msg="cboxOpen: open via bridge failed" reason="%s"' % foe.msg)
return foe.msg, foe.statuscode
# generate the target for the app engine
return '%s&access_token=%s' % (utils.generateWopiSrc(inode), acctok)
@Wopi.app.route("/wopi/cbox/endpoints", methods=['GET'])
@Wopi.metrics.do_not_track()
def cboxAppEndPoints_deprecated():
'''Returns the office apps end-points registered with this WOPI server. This is used by the old Reva
to discover which Apps frontends can be used with this WOPI server. The new Reva/IOP
includes this logic in the AppProvider and AppRegistry, and once it's fully adopted this logic
will be removed from the WOPI server.
Note that if the end-points are relocated and the corresponding configuration entry updated,
the WOPI server must be restarted.'''
Wopi.log.info('msg="cboxEndPoints: returning all registered office apps end-points" client="%s" mimetypescount="%d"' %
(flask.request.remote_addr, len(core.discovery.endpoints)))
return flask.Response(json.dumps(core.discovery.endpoints), mimetype='application/json')
@Wopi.app.route("/wopi/cbox/download", methods=['GET'])
def cboxDownload_deprecated():
'''The deprecated endpoint for download'''
return iopDownload()
#
# Start the Flask endless listening loop
#
if __name__ == '__main__':
Wopi.init()
core.discovery.initappsregistry() # deprecated
Wopi.run()
|
[] |
[] |
[
"HOST_HOSTNAME"
] |
[]
|
["HOST_HOSTNAME"]
|
python
| 1 | 0 | |
server/routes/devices.py
|
#import requests
import json
import mariadb
import os
import sys
import logging
from dotenv import load_dotenv
class devices(object):
def __init__(self):
load_dotenv()
self.logger = logging.getLogger('prometeo.device.devices')
self.logger.debug('creating an instance of devices')
def insert_device(self, sensorid, model, version):
# Instantiate Connection
try:
conn = mariadb.connect(
user = os.getenv("MARIADB_USERNAME"),
password = os.getenv("MARIADB_PASSWORD"),
host = os.getenv("MARIADB_HOST"),
database = "prometeo",
port = int(os.getenv("MARIADB_PORT")))
cursor = conn.cursor()
cursor.callproc('sp_create_device', (sensorid, model, version))
data = cursor.fetchall()
if len(data[0][0]) == 0:
con.commit()
return True
else:
return False
except mariadb.Error as e:
print("Error connecting to MariaDB Platform: {e}")
finally:
cursor.close()
conn.close()
def update_device(self, sensorid, model, version):
try:
conn = mariadb.connect(
user = os.getenv("MARIADB_USERNAME"),
password = os.getenv("MARIADB_PASSWORD"),
host = os.getenv("MARIADB_HOST"),
database = "prometeo",
port = int(os.getenv("MARIADB_PORT")))
cursor = conn.cursor()
cursor.callproc('sp_update_device', (sensorid, model, version))
data = cursor.fetchall()
if len(data[0][0]) == 0:
con.commit()
return True
else:
return False
except mariadb.Error as e:
print("Error connecting to MariaDB Platform: {e}")
except Exception as e:
return None
finally:
cursor.close()
conn.close()
def get_device(self, sensorid):
try:
conn = mariadb.connect(
user = os.getenv("MARIADB_USERNAME"),
password = os.getenv("MARIADB_PASSWORD"),
host = os.getenv("MARIADB_HOST"),
database = "prometeo",
port = int(os.getenv("MARIADB_PORT")))
cursor = conn.cursor()
cursor.callproc('sp_select_device', (sensorid))
data = cursor.fetchall()
if len(data) > 0:
return(data[0])
else:
return None
except mariadb.Error as e:
print("Error connecting to MariaDB Platform: {e}")
except Exception as e:
return None
finally:
cursor.close()
conn.close()
def get_alldevices(self):
print("get_alldevices - entro en la funcion")
try:
conn = mariadb.connect(
user = os.getenv("MARIADB_USERNAME"),
password = os.getenv("MARIADB_PASSWORD"),
host = os.getenv("MARIADB_HOST"),
database = "prometeo",
port = int(os.getenv("MARIADB_PORT")))
cursor = conn.cursor()
print("get_alldevices - llamada a sql")
cursor.callproc('sp_select_all_devices')
data = cursor.fetchall()
if len(data) > 0:
print("get_alldevices - Hay informacion")
for i in data:
print(i)
return(data)
else:
print("get_alldevices - NO HAY INFORMACION")
return None
except Exception as e:
print("get_alldevices - Estoy en la excepcion")
return None
finally:
cursor.close()
conn.close()
|
[] |
[] |
[
"MARIADB_PORT",
"MARIADB_USERNAME",
"MARIADB_PASSWORD",
"MARIADB_HOST"
] |
[]
|
["MARIADB_PORT", "MARIADB_USERNAME", "MARIADB_PASSWORD", "MARIADB_HOST"]
|
python
| 4 | 0 | |
src/main/java/com/delacruzhome/navytracker/repositories/TrainingRepositoryImpl.java
|
package com.delacruzhome.navytracker.repositories;
import java.util.ArrayList;
import java.util.List;
import com.delacruzhome.navytracker.factories.IFactory;
import com.delacruzhome.navytracker.factories.TrainingFactoryImpl;
import com.delacruzhome.navytracker.models.Training;
import com.mongodb.ConnectionString;
import com.mongodb.MongoClientSettings;
import com.mongodb.client.*;
import static com.mongodb.client.model.Filters.*;
import org.bson.BsonValue;
import org.bson.types.ObjectId;
public class TrainingRepositoryImpl extends RepositoryBase<Training> {
private static TrainingRepositoryImpl trainingRepository;
private static boolean initialized = false;
private TrainingRepositoryImpl(MongoClient client, IFactory<Training> trainingFactory) {
super(client, trainingFactory, "trainings");
}
private static void init() {
String connString = System.getenv("MongoURI");
MongoClient client = MongoClients.create(
MongoClientSettings.builder()
.applyConnectionString(
new ConnectionString(connString))
.build()
);
IFactory<Training> trainingFactory = new TrainingFactoryImpl();
trainingRepository = new TrainingRepositoryImpl(client, trainingFactory);
}
public static synchronized TrainingRepositoryImpl getInstance() {
if (initialized) return trainingRepository;
init();
initialized = true;
return trainingRepository;
}
@Override
public List<Training> getAll() {
final MongoCursor<Training> cursor = collection.find()
.map(doc -> trainingFactory.create(doc)).cursor();
final List<Training> result = new ArrayList<>();
while (cursor.hasNext()) {
final Training t = cursor.next();
result.add(t);
}
return result == null ? new ArrayList<>() : result;
}
@Override
public Training findById(final ObjectId id) {
return collection.find(eq("_id", id))
.map(doc -> trainingFactory.create(doc)).first();
}
@Override
public Training add(Training training) {
BsonValue id = collection
.insertOne(trainingFactory.createDocument(training)).getInsertedId();
training.setId(id.asObjectId().getValue().toString());
return training;
}
@Override
public Training update(final Training training) throws IllegalArgumentException {
long count = collection.replaceOne(eq("_id", new ObjectId(training.getId())),
trainingFactory.createDocument(training)).getModifiedCount();
if (count < 1) {
throw new IllegalArgumentException("Could not modify, the id provided is invalid");
}
return training;
}
@Override
public void delete(final ObjectId id) throws IllegalArgumentException {
long count = collection.deleteOne(eq("_id", id)).getDeletedCount();
if (count < 1){
throw new IllegalArgumentException("Could not delete, the id provided is invalid");
}
}
}
|
[
"\"MongoURI\""
] |
[] |
[
"MongoURI"
] |
[]
|
["MongoURI"]
|
java
| 1 | 0 | |
internal/characters/hutao/hutao_test.go
|
package hutao
import (
"os"
"testing"
"github.com/genshinsim/gcsim/internal/tests"
"github.com/genshinsim/gcsim/pkg/core"
"github.com/genshinsim/gcsim/pkg/enemy"
"github.com/genshinsim/gcsim/pkg/player"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var logger *zap.SugaredLogger
func TestMain(m *testing.M) {
// call flag.Parse() here if TestMain uses flags
config := zap.NewDevelopmentConfig()
debug := os.Getenv("GCSIM_VERBOSE_TEST")
level := zapcore.InfoLevel
if debug != "" {
level = zapcore.DebugLevel
}
config.Level = zap.NewAtomicLevelAt(level)
config.EncoderConfig.TimeKey = ""
log, _ := config.Build(zap.AddCallerSkip(1))
logger = log.Sugar()
os.Exit(m.Run())
}
func TestBasicAbilUsage(t *testing.T) {
c, err := core.New(func(c *core.Core) error {
c.Log = logger
return nil
})
if err != nil {
t.Error(err)
t.FailNow()
}
prof := tests.CharProfile(core.Hutao, core.Pyro, 6)
x, err := NewChar(c, prof)
//cast it to *char so we can access private members
// this := x.(*char)
if err != nil {
t.Error(err)
t.FailNow()
}
c.Chars = append(c.Chars, x)
c.CharPos[prof.Base.Key] = 0
//add targets to test with
eProf := tests.EnemeyProfile()
c.Targets = append(c.Targets, player.New(0, c))
c.Targets = append(c.Targets, enemy.New(1, c, eProf))
p := make(map[string]int)
var f int
f, _ = x.Skill(p)
for i := 0; i < f; i++ {
c.Tick()
}
f, _ = x.Burst(p)
for i := 0; i < f; i++ {
c.Tick()
}
//bunch of attacks
for j := 0; j < 10; j++ {
f, _ = x.Attack(p)
for i := 0; i < f; i++ {
c.Tick()
}
}
//charge attack
f, _ = x.ChargeAttack(p)
for i := 0; i < f; i++ {
c.Tick()
}
//tick a bunch of times after
for i := 0; i < 1200; i++ {
c.Tick()
}
}
|
[
"\"GCSIM_VERBOSE_TEST\""
] |
[] |
[
"GCSIM_VERBOSE_TEST"
] |
[]
|
["GCSIM_VERBOSE_TEST"]
|
go
| 1 | 0 | |
dallinger/utils.py
|
import functools
import io
import os
import random
import shutil
import string
import subprocess
import sys
import tempfile
import redis
from pkg_resources import get_distribution
from six.moves.urllib.parse import urlparse
from dallinger.config import get_config
def connect_to_redis(url=None):
"""Connect to Redis.
If a URL is supplied, it will be used, otherwise an environment variable
is checked before falling back to a default.
Since we are generally running on Heroku, and configuring SSL certificates
is challenging, we disable cert requirements on secure connections.
"""
redis_url = url or os.getenv("REDIS_URL", "redis://localhost:6379")
connection_args = {"url": redis_url}
if urlparse(redis_url).scheme == "rediss":
connection_args["ssl_cert_reqs"] = None
return redis.from_url(**connection_args)
def get_base_url():
config = get_config()
host = os.getenv("HOST", config.get("host"))
if "herokuapp.com" in host:
if host.startswith("https://"):
base_url = host
elif host.startswith("http://"):
base_url = host.replace("http://", "https://")
else:
base_url = "https://{}".format(host)
else:
# debug mode
base_port = config.get("base_port")
port = random.randrange(base_port, base_port + config.get("num_dynos_web"))
base_url = "http://{}:{}".format(host, port)
return base_url
def dallinger_package_path():
"""Return the absolute path of the root directory of the installed
Dallinger package:
>>> utils.dallinger_package_location()
'/Users/janedoe/projects/Dallinger3/dallinger'
"""
dist = get_distribution("dallinger")
src_base = os.path.join(dist.location, dist.project_name)
return src_base
def generate_random_id(size=6, chars=string.ascii_uppercase + string.digits):
"""Generate random id numbers."""
return "".join(random.choice(chars) for x in range(size))
def ensure_directory(path):
"""Create a matching path if it does not already exist"""
if not os.path.exists(path):
os.makedirs(path)
def run_command(cmd, out, ignore_errors=False):
"""We want to both send subprocess output to stdout or another file
descriptor as the subprocess runs, *and* capture the actual exception
message on errors. CalledProcessErrors do not reliably contain the
underlying exception in either the 'message' or 'out' attributes, so
we tee the stderr to a temporary file and if a CalledProcessError is
raised we read its contents to recover stderr
"""
tempdir = tempfile.mkdtemp()
output_file = os.path.join(tempdir, "stderr")
original_cmd = " ".join(cmd)
p = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)
t = subprocess.Popen(["tee", output_file], stdin=p.stderr, stdout=out)
t.wait()
p.communicate()
p.stderr.close()
if p.returncode != 0 and not ignore_errors:
with open(output_file, "r") as output:
error = output.read()
message = 'Command: "{}": Error: "{}"'.format(
original_cmd, error.replace("\n", "")
)
shutil.rmtree(tempdir, ignore_errors=True)
raise CommandError(message)
shutil.rmtree(tempdir, ignore_errors=True)
return p.returncode
class CommandError(Exception):
"""Something went wrong executing a subprocess command"""
class GitError(Exception):
"""Something went wrong calling a Git command"""
class GitClient(object):
"""Minimal wrapper, mostly for mocking"""
def __init__(self, output=None):
self.encoding = None
if output is None:
self.out = sys.stdout
else:
self.out = output
def init(self, config=None):
self._run(["git", "init"])
if config is not None:
for k, v in config.items():
self._run(["git", "config", k, v])
def add(self, what):
self._run(["git", "add", what])
def commit(self, msg):
self._run(["git", "commit", "-m", '"{}"'.format(msg)])
def push(self, remote, branch):
cmd = ["git", "push", remote, branch]
self._run(cmd)
def clone(self, repository):
tempdir = tempfile.mkdtemp()
cmd = ["git", "clone", repository, tempdir]
self._run(cmd)
return tempdir
def _run(self, cmd):
self._log(cmd)
try:
run_command(cmd, self.out)
except CommandError as e:
raise GitError(str(e))
def _log(self, cmd):
msg = '{}: "{}"'.format(self.__class__.__name__, " ".join(cmd))
if self.encoding:
msg = msg.encode(self.encoding)
self.out.write(msg)
class ParticipationTime(object):
grace_period_seconds = 120
def __init__(self, participant, reference_time, config):
self.participant = participant
self.when = reference_time
self.allowed_hours = config.get("duration")
self.app_id = config.get("app_id", "unknown")
@property
def assignment_id(self):
return self.participant.assignment_id
@property
def allowed_minutes(self):
return self.allowed_hours * 60
@property
def allowed_seconds(self):
return self.allowed_hours * 60.0 * 60.0
@property
def active_seconds(self):
delta = self.when - self.participant.creation_time
return delta.total_seconds()
@property
def active_minutes(self):
return self.active_seconds / 60
@property
def excess_minutes(self):
return (self.active_seconds - self.allowed_seconds) / 60
@property
def is_overdue(self):
total_allowed_seconds = self.allowed_seconds + self.grace_period_seconds
return self.active_seconds > total_allowed_seconds
def wrap_subprocess_call(func, wrap_stdout=True):
@functools.wraps(func)
def wrapper(*popenargs, **kwargs):
out = kwargs.get("stdout", None)
err = kwargs.get("stderr", None)
replay_out = False
replay_err = False
if out is None and wrap_stdout:
try:
sys.stdout.fileno()
except io.UnsupportedOperation:
kwargs["stdout"] = tempfile.NamedTemporaryFile()
replay_out = True
if err is None:
try:
sys.stderr.fileno()
except io.UnsupportedOperation:
kwargs["stderr"] = tempfile.NamedTemporaryFile()
replay_err = True
try:
return func(*popenargs, **kwargs)
finally:
if replay_out:
kwargs["stdout"].seek(0)
sys.stdout.write(kwargs["stdout"].read())
if replay_err:
kwargs["stderr"].seek(0)
sys.stderr.write(kwargs["stderr"].read())
return wrapper
check_call = wrap_subprocess_call(subprocess.check_call)
call = wrap_subprocess_call(subprocess.call)
check_output = wrap_subprocess_call(subprocess.check_output, wrap_stdout=False)
|
[] |
[] |
[
"REDIS_URL",
"HOST"
] |
[]
|
["REDIS_URL", "HOST"]
|
python
| 2 | 0 | |
src/boogie/experimental/faas/example.py
|
import os
from django import setup
from django.core.handlers.wsgi import WSGIHandler
class FaaS:
"""
Base class for FaaS applications.
"""
@classmethod
def from_settings(cls, settings, **kwargs):
function = cls(settings, **kwargs)
function.init()
return function
def __init__(self, settings, methods=None):
self.settings = settings
self.wsgi = None
def init(self):
"""
Start Django project.
"""
os.environ["DJANGO_SETTINGS_MODULE"] = self.settings
setup()
self.wsgi = WSGIHandler()
def request(self, url, **kwargs):
request = self.make_request(url, **kwargs)
print(request)
self.wsgi()
#
# example
#
# function = FaaS.from_settings("tests.testproject.settings")
# print(function.request("?api/v1/"))
# print(function.request("?api/v1/"))
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
tests/xpath/01-list_leaflist/run.py
|
#!/usr/bin/env python
import sys
import os
import getopt
TESTNAME = "list-tc01"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "k", ["keepfiles"])
except getopt.GetoptError as e:
print str(e)
sys.exit(127)
k = False
for o, a in opts:
if o in ["-k", "--keepfiles"]:
k = True
pythonpath = os.environ.get("PATH_TO_PYBIND_TEST_PYTHON") if \
os.environ.get('PATH_TO_PYBIND_TEST_PYTHON') is not None \
else sys.executable
pyangpath = os.environ.get('PYANGPATH') if os.environ.get('PYANGPATH') \
is not None else False
pyangbindpath = os.environ.get('PYANGBINDPATH') if \
os.environ.get('PYANGBINDPATH') is not None else False
assert pyangpath is not False, "could not find path to pyang"
assert pyangbindpath is not False, "could not resolve pyangbind directory"
this_dir = os.path.dirname(os.path.realpath(__file__))
cmd = "%s " % pythonpath
cmd += "%s --plugindir %s/pyangbind/plugin" % (pyangpath, pyangbindpath)
cmd += " -f pybind -o %s/bindings.py" % this_dir
cmd += " -p %s" % this_dir
cmd += " --use-xpathhelper"
cmd += " %s/%s.yang" % (this_dir, TESTNAME)
print cmd
os.system(cmd)
from bindings import list_tc01 as ytest
yhelper = YANGPathHelper()
yobj = ytest(path_helper=yhelper)
t1_leaflist(yobj, yhelper)
t2_list(yobj, yhelper)
t3_leaflist_remove(yobj, yhelper)
t4_list_remove(yobj, yhelper)
t5_typedef_leaflist_add_del(yobj, yhelper)
t6_typedef_list_add(yobj, yhelper)
t7_leaflist_of_leafrefs(yobj, yhelper)
t8_standalone_leaflist_check(yobj, yhelper)
t9_get_list(yobj, yhelper)
if not k:
os.system("/bin/rm %s/bindings.py" % this_dir)
os.system("/bin/rm %s/bindings.pyc" % this_dir)
def t1_leaflist(yobj, tree):
for a in ["mackerel", "trout", "haddock", "flounder"]:
yobj.container.t1.append(a)
for tc in [("mackerel", True), ("haddock", True), ("minnow", False)]:
validref = False
try:
yobj.reference.t1_ptr = tc[0]
validref = True
except ValueError:
pass
assert validref == tc[1], "Reference was incorrectly set for a" + \
" leaflist (%s not in %s -> %s != %s)" % (tc[0],
str(yobj.container.t1), validref, tc[1])
for tc in [("flounder", "exists"), ("minnow", "does not exist")]:
validref = False
try:
yobj.reference.t1_ptr_noexist = tc[0]
validref = True
except ValueError:
pass
assert validref is True, "Reference was incorrectly set for a " + \
" leaflist with require_instance set to false " + \
"(%s threw error, but it %s)" % tc[1]
def t2_list(yobj, tree):
for o in ["kangaroo", "wallaby", "koala", "dingo"]:
yobj.container.t2.add(o)
for tc in [("kangaroo", True), ("koala", True), ("wombat", False)]:
validref = False
try:
yobj.reference.t2_ptr = tc[0]
validref = True
except ValueError:
pass
assert validref == tc[1], "Reference was incorrectly set for a list" + \
" (%s not in %s -> %s != %s)" % (tc[0], yobj.container.t2.keys(),
validref, tc[1])
def t3_leaflist_remove(yobj, tree):
for b in ["oatmeal-stout", "amber-ale", "pale-ale", "pils",
"ipa", "session-ipa"]:
yobj.container.t3.append(b)
for b in [("session-ipa", 1), ("amber-ale", 1), ("moose-drool", 0)]:
path = "/container/t3"
retr = tree.get(path)
passed = False
assert len(retr) == 1, \
"Looking up a leaf-list element returned multiple elements " + \
"erroneously (%s -> %d elements (%s))" % (b[0], len(retr), retr)
found = False
try:
v = retr[0].index(b[0])
found = 1
except ValueError:
found = 0
assert found == b[1], \
"When retrieving a leaf-list element, a known value was not in " + \
" the list (%s -> %s (%s))" % (b[0], b[1], retr[0])
rem = False
try:
yobj.container.t3.remove(b[0])
rem = True
except ValueError:
pass
assert rem == bool(b[1]), "Removal of a leaflist element did not " + \
"return expected result (%s -> %s != %s)" % (b[0], rem, bool(b[1]))
new_retr = tree.get(path)
found = False
try:
v = new_retr[0].index(b[0])
found = 1
except ValueError:
found = 0
assert found == 0, "An element was not correctly removed from the " + \
"leaf-list (%s -> %s [%s])" % (b[0], path, new_retr[0])
def t4_list_remove(yobj, tree):
for b in ["steam", "liberty", "california-lager", "porter", "ipa",
"foghorn"]:
yobj.container.t4.add(b)
for b in [("steam", 1), ("liberty", 1), ("pygmy-owl", 0)]:
path = "/container/t4[keyval=%s]" % b[0]
retr = tree.get(path)
assert len(retr) == b[1], \
"Retreive of a list element returned the wrong number of elements " + \
"(%s -> %d != %d)" % (b[0], len(retr), b[1])
rem = False
try:
yobj.container.t4.delete(b[0])
rem = True
except KeyError:
pass
assert rem == bool(b[1]), "Removal of a list element did not return " + \
"expected result (%s -> %s != %s)" % (b[0], rem, bool(b[1]))
new_retr = tree.get(path)
assert len(new_retr) == 0, "An element was not correctly removed from " + \
"the list (%s -> len(%s) = %d)" % (b[0], path, len(new_retr))
def t5_typedef_leaflist_add_del(yobj, tree=False):
for a in ["quebec-city", "montreal", "laval", "gatineau"]:
yobj.container.t5.append(a)
for tc in [("quebec-city", True), ("montreal", True), ("dallas", False)]:
validref = False
try:
yobj.reference.t5_ptr = tc[0]
validref = True
except ValueError:
pass
assert validref == tc[1], "Reference was incorrectly set for a " + \
" leaflist (%s not in %s -> %s != %s)" % (tc[0],
str(yobj.container.t5), validref, tc[1])
for a in ["vancouver", "burnaby", "surrey", "richmond"]:
yobj.container.t5.append(a)
for tc in [("vancouver", True), ("burnaby", True), ("san-francisco", False),
("surrey", True), ("richmond", True)]:
path = "/container/t5"
retr = tree.get(path)
assert (tc[0] in retr[0]) == tc[1], "Retrieve of a leaf-list element " + \
"did not return expected result (%s->%s %s)" % (tc[0], tc[1],
(retr[0]))
rem = False
try:
retr[0].remove(tc[0])
rem = True
except ValueError:
pass
new_retr = tree.get(path)
assert rem == bool(tc[1]), "An element was not correctly removed from " + \
"a leaf-list (%s -> len(%s) = %d)" % (tc[0], path, len(new_retr))
for tc in [("gatineau", True), ("laval", True), ("new-york-city", False),
("quebec-city", True)]:
path = "/container/t5"
retr = tree.get(path)
assert (tc[0] in retr[0]) == tc[1], "Retrieve of a leaf-list element " + \
"did not return expected result (%s->%s %s)" % (tc[0], tc[1],
(retr[0]))
popped_obj = retr[0].pop()
if popped_obj == tc[0]:
expected_obj = True
else:
expected_obj = False
assert expected_obj == bool(tc[1]), "Popped object was not the " + \
"object that was expected (%s != %s)" % (tc[0], popped_obj)
new_retr = tree.get(path)
assert (tc[0] in new_retr[0]) == False, "Retrieve of a leaf-list " + \
"element did not return expected result (%s->%s %s)" % (tc[0], tc[1],
(new_retr[0]))
def t6_typedef_list_add(yobj, tree):
for o in ["la-ciboire", "la-chipie", "la-joufflue", "la-matante"]:
yobj.container.t6.add(o)
for tc in [("la-ciboire", True), ("la-matante", True), ("heiniken", False)]:
validref = False
try:
yobj.reference.t6_ptr = tc[0]
validref = True
except ValueError:
pass
assert validref == tc[1], "Reference was incorrectly set for a list" + \
" (%s not in %s -> %s != %s)" % (tc[0], yobj.container.t6.keys(),
validref, tc[1])
def t7_leaflist_of_leafrefs(yobj, tree):
test_list = [("snapshot", True), ("ranger", True), ("trout-slayer", False)]
for b in test_list:
if b[1]:
yobj.container.t7.append(b[0])
for b in test_list:
passed = False
try:
yobj.reference.t7_ptr.append(b[0])
passed = True
except:
pass
assert passed == b[1], "A reference to a leaf-list of leafrefs " + \
"was not correctly set (%s -> %s, expected %s)" % (b[0], passed, b[1])
def t8_standalone_leaflist_check(yobj, tree):
yobj.standalone.ll.append(1)
x = tree.get("/standalone/ll")
assert x[0][0] == 1, "leaf-list was not as expected"
yobj.standalone.l.add(1)
x = tree.get("/standalone/l")
assert x[0].x == 1, "list key was not as expected"
yobj.standalone.ref = 1
assert yobj.standalone.ref._referenced_object == 1, "reference was not correct"
def t9_get_list(yobj, tree):
l = tree.get_list("/standalone/l")
assert l._yang_name == "l", "Did not retrieve correct attribute for list"
assert l._is_container == "list", "Did not retrieve a list for the list"
if __name__ == '__main__':
from pyangbind.lib.xpathhelper import YANGPathHelper, XPathError
main()
|
[] |
[] |
[
"PATH_TO_PYBIND_TEST_PYTHON",
"PYANGPATH",
"PYANGBINDPATH"
] |
[]
|
["PATH_TO_PYBIND_TEST_PYTHON", "PYANGPATH", "PYANGBINDPATH"]
|
python
| 3 | 0 | |
restorm/tests/__init__.py
|
# This file mainly exists to allow python setup.py test to work.
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'restorm.tests.django_settings'
test_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, test_dir)
from django.test.utils import get_runner
from django.conf import settings
def runtests():
test_runner = get_runner(settings)
failures = test_runner([], verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests()
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
src/main.go
|
package main
import (
"context"
"os"
"encoding/json"
"fmt"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type BuildDetail struct {
Status string `json:"build-status"`
Name string `json:"project-name"`
}
//HandleRequest handle she change for a given codebuild alarm
func HandleRequest(ctx context.Context, event events.CloudWatchEvent) error {
buildDetail := BuildDetail{}
if err := json.Unmarshal(event.Detail, &buildDetail); err != nil {
return fmt.Errorf("Unable to unmarshal event details %w", err)
}
bucket := os.Getenv("BUCKET")
key := os.Getenv("KEY")
if len(bucket) <= 0 {
return fmt.Errorf("Unable to update project %s with a status of %s, Please set BUCKET environment variable", buildDetail.Name, buildDetail.Status)
}
source := fmt.Sprintf(`%s/%s/%s.svg`,bucket, key, buildDetail.Status)
destination := fmt.Sprintf(`%s/%s/STATUS.svg`, key, buildDetail.Name)
sess, sessErr := session.NewSession()
if sessErr != nil {
return fmt.Errorf("Unable to create a session %w", sessErr)
}
svc := s3.New(sess)
// copy the object
_, copyErr := svc.CopyObject(&s3.CopyObjectInput{Bucket: aws.String(bucket), CopySource: aws.String(source), Key: aws.String(destination)})
if copyErr != nil {
return fmt.Errorf("Error copying status from %s to %s %w", source, destination, copyErr)
}
// Wait to see if the item got copied
copyWaitErr := svc.WaitUntilObjectExists(&s3.HeadObjectInput{Bucket: aws.String(bucket), Key: aws.String(destination)})
if copyWaitErr != nil {
return fmt.Errorf("Error waiting for item %s to be copied to %s %w", source, destination, copyWaitErr)
}
return nil
}
func main() {
lambda.Start(HandleRequest)
}
|
[
"\"BUCKET\"",
"\"KEY\""
] |
[] |
[
"KEY",
"BUCKET"
] |
[]
|
["KEY", "BUCKET"]
|
go
| 2 | 0 | |
tests/unit/test_databroker.py
|
import json
import os
import unittest
from pathlib import Path
from buildpack.databroker import connect, streams
from buildpack.databroker.config_generator.scripts.configloader import (
configinitializer,
)
from buildpack.databroker.config_generator.scripts.utils import write_file
from buildpack.databroker.config_generator.scripts.generators import (
debezium as debezium_generator,
)
# Constants
TEST_METADATA_FILE_PATH = "/tmp/metadata.json"
TEST_DEPENDENCIES_FILE_PATH = "/tmp/dependencies.json"
TEST_BROKER_URL = "localhost:9092"
LOCAL_DATABROKER_FOLDER = "{}/.local/databroker".format(os.getcwd())
KAFKA_CONNECT_DIR = "{}/kafka-connect".format(LOCAL_DATABROKER_FOLDER)
KAFKA_CONNECT_CFG_NAME = "connect.properties"
KAFKA_CONNECT_CFG_PATH = "{}/{}".format(
KAFKA_CONNECT_DIR, KAFKA_CONNECT_CFG_NAME
)
LOG4J_DEBEZIUM_CFG_PATH = "{}/{}".format(
KAFKA_CONNECT_DIR, "debezium-log4j.properties"
)
STREAM_SIDECAR_DIR = "{}/producer-streams/stream-sidecar-{}".format(
LOCAL_DATABROKER_FOLDER, streams.get_pdr_stream_version()
)
STREAM_TOPOLOGY_CFG_NAME = "topology.conf"
STREAM_TOPOLOGY_CFG_PATH = "{}/{}".format(
STREAM_SIDECAR_DIR, STREAM_TOPOLOGY_CFG_NAME
)
STREAM_AZKARRA_CFG_NAME = "azkarra.conf"
STREAM_AZKARRA_CFG_PATH = "{}/{}".format(
STREAM_SIDECAR_DIR, STREAM_AZKARRA_CFG_NAME
)
class TestDataBrokerConfigs(unittest.TestCase):
complete_producer_conf = None
# define metadata config
metadata_config = """
{
"Constants": [
{
"Name": "MyFirstModule.Kafka_broker_url",
"Type": "String",
"Description": "",
"DefaultValue": "localhost:9092"
},
{
"Name": "Atlas_UI_Resources.Atlas_UI_Resources_Version",
"Type": "String",
"Description": "",
"DefaultValue": " 2.5.4"
}
],
"ScheduledEvents": [],
"DataBrokerConfiguration": {
"publishedServices": [
{
"brokerUrl": "MyFirstModule.Kafka_broker_url",
"entities": [
{
"originalEntityName": "MyFirstModule.company",
"publicEntityName": "MyFirstModule.company",
"topicName": "bde821e1-f8cf-43c3-9c49-8af49bebb084.16747dc6-b6b7-42ae-aabf-255dca2aeeaf.56f74de7-32c5-48c9-8157-7df3670896db.1_0_0",
"attributeMapping": {
"INT_CompanyName": "CompanyName",
"INT_CompanyId": "CompanyId",
"INT_CompanyAddress": "INT_CompanyAddress"
}
},
{
"originalEntityName": "MyFirstModule.project",
"publicEntityName": "MyFirstModule.projectPublic",
"topicName": "bde821ec-f8cf-43cc-9c4c-8af49bebb08c.16747dcc-b6bc-42ac-aabc-255dca2aeeac.56f74dec-32cc-48cc-8157-7df3670896dc.1_0_0",
"attributeMapping": {
"INT_ProjectName": "ProjectName",
"INT_ProjectId": "ProjectId",
"INT_ProjectAddress": "INT_ProjectAddress"
}
}
]
},
{
"brokerUrl": "MyFirstModule.Kafka_broker_url",
"entities": [
{
"originalEntityName": "MyFirstModule.companyint",
"publicEntityName": "MyFirstModule.companypub",
"topicName": "bde821ed-f8cd-43c3-9c4d-8af49bebb08d.16747dcd-b6bd-42ad-aabd-255dca2aeead.56f74ded-32cd-48cd-815d-7df3670896dd.1_0_0",
"attributeMapping": {
"INT_CompanyPubName": "CompanyPubName",
"INT_CompanyPubId": "CompanyPubId",
"INT_CompanyPubAddress": "INT_CompanyPubAddress"
}
},
{
"originalEntityName": "MyFirstModule.member",
"publicEntityName": "MyFirstModule.memberpub",
"topicName": "bde821ee-f8ce-43ce-9c4e-8af49bebb08e.16747dce-b6be-42ae-aabe-255dca2aeeae.56f74dee-32ce-48ce-815e-7df3670896de.1_0_0",
"attributeMapping": {
"INT_MemberPubName": "MemberPubName",
"INT_MemberPubId": "MemberPubId",
"INT_MemberPubAddress": "INT_MemberPubAddress"
}
}
]
}
]
}
}
"""
# define dependencies config
dependencies_config = """
{
"schemaVersion": "0.2",
"appName": "Simple-Producer-App",
"published": [],
"consumed": []
}
"""
def setUp(self):
# transform string to file mode
write_file(TEST_METADATA_FILE_PATH, self.metadata_config)
write_file(TEST_DEPENDENCIES_FILE_PATH, self.dependencies_config)
# define environment variables
os.environ["MXRUNTIME_DatabaseType"] = "PostgreSQL"
os.environ["MXRUNTIME_DatabaseHost"] = "localhost:5432"
os.environ["MXRUNTIME_DatabaseUserName"] = "mx-app"
os.environ["MXRUNTIME_DatabaseName"] = "mendix"
os.environ["MXRUNTIME_DatabasePassword"] = "mx-app-password"
# environment variable will overwrite the defautl constant value
os.environ["MX_MyFirstModule.Kafka_broker_url"] = TEST_BROKER_URL
metadata_file = open(TEST_METADATA_FILE_PATH, "rt")
dependencies_file = open(TEST_DEPENDENCIES_FILE_PATH, "rt")
database_config = {}
self.complete_producer_conf = configinitializer.unify_configs(
[metadata_file, dependencies_file], database_config
)
metadata_file.close()
dependencies_file.close()
def tearDown(self):
os.unlink(TEST_METADATA_FILE_PATH)
os.unlink(TEST_DEPENDENCIES_FILE_PATH)
def _check_folder_exist(self, folder_path):
os.makedirs(folder_path, exist_ok=True)
def test_kafka_connect_config(self):
self._check_folder_exist(KAFKA_CONNECT_DIR)
# check config has been generated
connect.setup_configs(self.complete_producer_conf)
assert os.path.isfile(KAFKA_CONNECT_CFG_PATH)
actual_config = {}
with open(KAFKA_CONNECT_CFG_PATH, "r") as f:
for line in f.readlines():
tmp_line = line.strip().split("=")
actual_config[tmp_line[0]] = tmp_line[1]
assert actual_config["bootstrap.servers"] == os.environ.get(
"MX_MyFirstModule.Kafka_broker_url"
)
# verify postgres whitelists
debezium_config = json.loads(
debezium_generator.generate_config(self.complete_producer_conf)
)
assert (
debezium_config["config"]["table.whitelist"]
== ".*MyFirstModule.company.*,.*MyFirstModule.project.*"
)
assert (
debezium_config["config"]["column.whitelist"]
== ".*MyFirstModule.company.*,MyFirstModule.company.id,.*MyFirstModule.project.*,MyFirstModule.project.id"
)
def test_streams_override(self):
os.environ["DATABROKER_STREAMS_VERSION"] = "0.99999"
assert streams.get_pdr_stream_version() == "0.99999"
del os.environ["DATABROKER_STREAMS_VERSION"] # reset
# default
assert streams.get_pdr_stream_version() == "0.23.0-9"
# There are two configs for streams, one is topology.conf another is azkarra.conf
# Make sure specifice fields would be replaced with correct value based on template file
def test_stream_config(self):
self._check_folder_exist(STREAM_SIDECAR_DIR)
streams.setup_configs(self.complete_producer_conf)
# verify topology config
assert os.path.isfile(STREAM_TOPOLOGY_CFG_PATH)
expect_metadata_config = json.loads(self.metadata_config)
with open(STREAM_TOPOLOGY_CFG_PATH, "r") as f:
actual_config = json.loads(f.read())
assert actual_config["topologies"][0][
"name"
] == "{} topology".format(
expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][0]["publicEntityName"]
)
assert (
actual_config["topologies"][0]["source"]
== "mendix.public.myfirstmodule_company.private"
)
assert (
actual_config["topologies"][0]["sink"]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][0]["topicName"]
)
assert (
actual_config["topologies"][0]["originalEntityName"]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][0]["originalEntityName"]
)
assert (
actual_config["topologies"][0]["publicEntityName"]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][0]["publicEntityName"]
)
assert (
actual_config["topologies"][0]["attributeMapping"][
"INT_CompanyName"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][0]["attributeMapping"]["INT_CompanyName"]
)
assert (
actual_config["topologies"][0]["attributeMapping"][
"INT_CompanyId"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][0]["attributeMapping"]["INT_CompanyId"]
)
assert (
actual_config["topologies"][0]["attributeMapping"][
"INT_CompanyAddress"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][0]["attributeMapping"]["INT_CompanyAddress"]
)
assert (
actual_config["topologies"][1]["originalEntityName"]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][1]["originalEntityName"]
)
assert (
actual_config["topologies"][1]["publicEntityName"]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][1]["publicEntityName"]
)
assert (
actual_config["topologies"][1]["attributeMapping"][
"INT_ProjectName"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][1]["attributeMapping"]["INT_ProjectName"]
)
assert (
actual_config["topologies"][1]["attributeMapping"][
"INT_ProjectId"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][1]["attributeMapping"]["INT_ProjectId"]
)
assert (
actual_config["topologies"][1]["attributeMapping"][
"INT_ProjectAddress"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][0]["entities"][1]["attributeMapping"]["INT_ProjectAddress"]
)
assert (
actual_config["topologies"][2]["originalEntityName"]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][0]["originalEntityName"]
)
assert (
actual_config["topologies"][2]["publicEntityName"]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][0]["publicEntityName"]
)
assert (
actual_config["topologies"][2]["attributeMapping"][
"INT_CompanyPubName"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][0]["attributeMapping"]["INT_CompanyPubName"]
)
assert (
actual_config["topologies"][2]["attributeMapping"][
"INT_CompanyPubId"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][0]["attributeMapping"]["INT_CompanyPubId"]
)
assert (
actual_config["topologies"][2]["attributeMapping"][
"INT_CompanyPubAddress"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][0]["attributeMapping"][
"INT_CompanyPubAddress"
]
)
assert (
actual_config["topologies"][3]["originalEntityName"]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][1]["originalEntityName"]
)
assert (
actual_config["topologies"][3]["publicEntityName"]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][1]["publicEntityName"]
)
assert (
actual_config["topologies"][3]["attributeMapping"][
"INT_MemberPubName"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][1]["attributeMapping"]["INT_MemberPubName"]
)
assert (
actual_config["topologies"][3]["attributeMapping"][
"INT_MemberPubId"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][1]["attributeMapping"]["INT_MemberPubId"]
)
assert (
actual_config["topologies"][3]["attributeMapping"][
"INT_MemberPubAddress"
]
== expect_metadata_config["DataBrokerConfiguration"][
"publishedServices"
][1]["entities"][1]["attributeMapping"]["INT_MemberPubAddress"]
)
# verify azkarra config
assert os.path.isfile(STREAM_AZKARRA_CFG_PATH)
with open(STREAM_AZKARRA_CFG_PATH, "r") as f:
actual_config = f.read()
assert (
str(actual_config).find(
'bootstrap.servers = "{}"'.format(
os.environ.get("MX_MyFirstModule.Kafka_broker_url")
)
)
> 1
)
# verify log4j configuration
assert os.path.isfile(LOG4J_DEBEZIUM_CFG_PATH)
assert (
Path(LOG4J_DEBEZIUM_CFG_PATH)
.read_text()
.find("log4j.rootLogger=INFO, stdout")
> -1
)
|
[] |
[] |
[
"MXRUNTIME_DatabasePassword",
"MXRUNTIME_DatabaseUserName",
"MX_MyFirstModule.Kafka_broker_url",
"MX_MyFirstModule.Kafka_broker_url\"\n ",
"DATABROKER_STREAMS_VERSION",
"MXRUNTIME_DatabaseName",
"MXRUNTIME_DatabaseType",
"MXRUNTIME_DatabaseHost"
] |
[]
|
["MXRUNTIME_DatabasePassword", "MXRUNTIME_DatabaseUserName", "MX_MyFirstModule.Kafka_broker_url", "MX_MyFirstModule.Kafka_broker_url\"\n ", "DATABROKER_STREAMS_VERSION", "MXRUNTIME_DatabaseName", "MXRUNTIME_DatabaseType", "MXRUNTIME_DatabaseHost"]
|
python
| 8 | 0 | |
manager/controllers/motion/motion_controllers.go
|
// Copyright 2020 IBM Corp.
// SPDX-License-Identifier: Apache-2.0
package motion
import (
"os"
"emperror.dev/errors"
motionv1 "fybrik.io/fybrik/manager/apis/motion/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// This function sets up all motion controllers including the webhooks given a controller manager.
// Webhooks can be activated/deactivated using the ENABLE_WEBHOOKS environment variable.
// This currently includes:
// - a manager for BatchTransfers
// - a manager for StreamTransfers
func SetupMotionControllers(mgr manager.Manager) error {
if err := NewBatchTransferReconciler(mgr, "BatchTransferController").SetupWithManager(mgr); err != nil {
return errors.Wrap(err, "unable to create BatchTransfer controller")
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err := (&motionv1.BatchTransfer{}).SetupWebhookWithManager(mgr); err != nil {
return errors.Wrap(err, "unable to create BatchTransfer webhook")
}
}
if err := NewStreamTransferReconciler(mgr, "StreamTransferController").SetupWithManager(mgr); err != nil {
return errors.Wrap(err, "unable to create StreamTransfer controller")
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err := (&motionv1.StreamTransfer{}).SetupWebhookWithManager(mgr); err != nil {
return errors.Wrap(err, "unable to create StreamTransfer webhook")
}
}
return nil
}
|
[
"\"ENABLE_WEBHOOKS\"",
"\"ENABLE_WEBHOOKS\""
] |
[] |
[
"ENABLE_WEBHOOKS"
] |
[]
|
["ENABLE_WEBHOOKS"]
|
go
| 1 | 0 | |
jupyter_notebook_config.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from IPython.lib import passwd
c.NotebookApp.ip = '*'
c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = False
c.MultiKernelManager.default_kernel_name = 'python3'
# sets a password if PASSWORD is set in the environment
if 'PASSWORD' in os.environ:
password = os.environ['PASSWORD']
if password:
c.NotebookApp.password = passwd(password)
else:
c.NotebookApp.password = ''
c.NotebookApp.token = ''
del os.environ['PASSWORD']
|
[] |
[] |
[
"PORT",
"PASSWORD"
] |
[]
|
["PORT", "PASSWORD"]
|
python
| 2 | 0 | |
backend/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hovii_34298.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
yago/main.go
|
package main
import (
"errors"
"fmt"
"go/build"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"github.com/spf13/cobra"
)
func GenDir(srcPath string, destPath, app string) error {
if srcInfo, err := os.Stat(srcPath); err != nil {
return err
} else {
if !srcInfo.IsDir() {
return errors.New("src path is not a correct directory!")
}
}
if destInfo, err := os.Stat(destPath); err != nil {
return err
} else {
if !destInfo.IsDir() {
return errors.New("dest path is not a correct directory!")
}
}
err := filepath.Walk(srcPath, func(path string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if !f.IsDir() {
//path := strings.Replace(path, "\\", "/", -1)
destNewPath := strings.Replace(path, srcPath, destPath, -1)
if err := GenFile(path, destNewPath, app); err != nil {
log.Println(fmt.Sprintf("create file %s error:", destNewPath), err.Error())
return err
}
}
return nil
})
return err
}
func GenFile(src, dest, app string) (err error) {
srcFile, err := os.Open(src)
if err != nil {
return err
}
defer srcFile.Close()
destSplitPathDirs := strings.Split(dest, string(filepath.Separator))
destSplitPath := ""
for index, dir := range destSplitPathDirs {
if index < len(destSplitPathDirs)-1 {
destSplitPath = filepath.Join(destSplitPath, dir)
b, _ := pathExists(destSplitPath)
if b == false {
err := os.Mkdir(destSplitPath, os.ModePerm)
if err != nil {
return err
}
}
}
}
dstFile, err := os.Create(dest)
if err != nil {
return err
}
defer dstFile.Close()
srcFileInfo, err := srcFile.Stat()
if err != nil {
return err
}
content := make([]byte, srcFileInfo.Size())
if _, err := srcFile.Read(content); err != nil {
return err
}
contentStr := strings.ReplaceAll(string(content), "github.com/goees/yago/example", app)
if _, err := dstFile.WriteString(contentStr); err != nil {
return err
}
return nil
}
func pathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func getCurDir() string {
dir, _ := filepath.Abs(filepath.Dir("."))
return filepath.Clean(dir)
//return strings.Replace(dir, "\\", "/", -1)
}
func getGoPath() string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
return gopath
}
var initCmd = &cobra.Command{
Use: "init",
Short: "Init app",
Long: `Init a app named by input`,
Run: func(cmd *cobra.Command, args []string) {
useMod, _ := cmd.Flags().GetBool("mod")
app, _ := cmd.Flags().GetString("app")
log.Println("create app", app)
if err := os.MkdirAll(app, 0755); err != nil {
log.Println("create app dir error:", err.Error())
}
var src string
if useMod {
src = filepath.Join(getGoPath(), "pkg", "mod", "github.com", "goees", "yago@"+Version, "example")
} else {
src = filepath.Join(getGoPath(), "src", "github.com", "goees", "yago", "example")
}
dest := app
if err := GenDir(src, dest, app); err != nil {
log.Println("create app error:", err.Error())
}
},
}
var newCmd = &cobra.Command{
Use: "new",
Short: "New module",
Long: `New a module named by input`,
Run: func(cmd *cobra.Command, args []string) {
if exist, _ := pathExists("go.mod"); !exist {
log.Println("current directory is not a go mod root path")
return
}
curDir := strings.Split(getCurDir(), string(filepath.Separator))
app := curDir[len(curDir)-1]
module, _ := cmd.Flags().GetString("module")
advance, _ := cmd.Flags().GetBool("advance")
log.Println("create module", module)
dirs := []string{"cmd", "dao", "http", "model", "rpc", "task"}
if advance {
dirs = append(dirs, "service")
}
for _, d := range dirs {
//dirPath := fmt.Sprintf("app/modules/%s/%s%s", module, module, d)
dirPath := filepath.Join("app", "modules", module, module+d)
if err := os.MkdirAll(dirPath, 0755); err != nil {
log.Println(fmt.Sprintf("create module dir %s error:", dirPath), err.Error())
return
}
//filePath := fmt.Sprintf("%s/%s.go", dirPath, module)
filePath := filepath.Join(dirPath, module+".go")
fileBody := fmt.Sprintf("package %s%s", module, d)
if err := ioutil.WriteFile(filePath, []byte(fileBody), 0644); err != nil {
log.Println(fmt.Sprintf("create module file %s error:", filePath), err.Error())
return
}
}
//routePath := "app/route/route.go"
routePath := filepath.Join("app", "route", "route.go")
routes := []string{"cmd", "http", "rpc", "task"}
for _, d := range routes {
// routePath := fmt.Sprintf("app/routes/%sroute/%s.go", d, d)
var routeBody []byte
var err error
if routeBody, err = ioutil.ReadFile(routePath); err != nil {
log.Println(fmt.Sprintf("read route file %s error:", routePath), err.Error())
return
}
newRoute := fmt.Sprintf("\t_ \"%s/app/modules/%s/%s%s\"\n)", app, module, module, d)
contentStr := strings.ReplaceAll(string(routeBody), ")", newRoute)
if err = ioutil.WriteFile(routePath, []byte(contentStr), 0644); err != nil {
log.Println(fmt.Sprintf("write route file %s error:", routePath), err.Error())
return
}
cmd := exec.Command("gofmt", "-w", routePath)
if err := cmd.Run(); err != nil {
log.Println(fmt.Sprintf("gofmt route file %s error:", routePath), err.Error())
return
}
}
},
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print version",
Long: `Print version`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("yago version", Version)
},
}
func camelString(s string) (string, error) {
s = strings.Trim(s, "_")
r, _ := regexp.Compile("^[a-zA-Z][a-zA-Z_0-9]*[a-zA-Z0-9]$")
if !r.MatchString(s) {
return "", errors.New("only support a-z A-Z _")
}
ss := strings.Split(s, "_")
for k, v := range ss {
ss[k] = strings.Title(v)
}
return strings.Join(ss, ""), nil
}
func genFileByTemplate(filename, template string) {
pkgName := strings.ToLower(filepath.Base(getCurDir()))
name, err := camelString(filename)
if err != nil {
log.Fatalln(err)
}
if len(name) < 2 {
log.Fatalln("the filename length is greater than at least 2")
}
lname := fmt.Sprintf("%s%s", strings.ToLower(name[0:1]), name[1:])
r := strings.NewReplacer("{{PACKAGE}}", pkgName, "{{NAME}}", name, "{{LNAME}}", lname)
content := r.Replace(template)
filePath := filepath.Join(getCurDir(), filename+".go")
if err := ioutil.WriteFile(filePath, []byte(content), 0644); err != nil {
log.Println(fmt.Sprintf("create file %s error:", filePath), err.Error())
return
} else {
log.Println(fmt.Sprintf("create file %s succ", filePath))
}
}
var genCmd = &cobra.Command{
Use: "gen",
Short: "auto generate file",
Long: `auto generate http, rpc, task, cmd, service, model file`,
Run: func(cmd *cobra.Command, args []string) {
a, err := cmd.Flags().GetString("http")
if err != nil {
log.Fatalln(err)
}
if len(a) > 0 {
genFileByTemplate(a, HttpTemplate)
return
}
r, err := cmd.Flags().GetString("rpc")
if err != nil {
log.Fatalln(err)
}
if len(r) > 0 {
genFileByTemplate(r, RpcTemplate)
return
}
c, err := cmd.Flags().GetString("cmd")
if err != nil {
log.Fatalln(err)
}
if len(c) > 0 {
genFileByTemplate(c, CmdTemplate)
return
}
t, err := cmd.Flags().GetString("task")
if err != nil {
log.Fatalln(err)
}
if len(t) > 0 {
genFileByTemplate(t, TaskTemplate)
return
}
s, err := cmd.Flags().GetString("service")
if err != nil {
log.Fatalln(err)
}
if len(s) > 0 {
genFileByTemplate(s, ServiceTemplate)
return
}
m, err := cmd.Flags().GetString("model")
if err != nil {
log.Fatalln(err)
}
if len(m) > 0 {
genFileByTemplate(m, ModelTemplate)
return
}
},
}
var (
lastUpdateTime = time.Now().Unix()
state sync.Mutex
cmd *exec.Cmd
)
var runCmd = &cobra.Command{
Use: "run",
Short: "Hot run app",
Long: "Hot build and run app",
Run: func(cmd *cobra.Command, args []string) {
pwd, _ := os.Getwd()
appName := filepath.Base(pwd)
files := make([]string, 0)
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatalln(err)
}
defer watcher.Close()
go func() {
for {
select {
case event := <-watcher.Events:
if !strings.HasSuffix(event.Name, ".go") {
continue
}
now := time.Now().Unix()
// 3 秒不重复编译
if now-lastUpdateTime < 3 {
continue
}
lastUpdateTime = now
autoBuildApp(appName)
// change file
if event.Op&fsnotify.Write == fsnotify.Write {
log.Println("[INFO] modified file: ", event.Name, " - ", event.String())
}
case err := <-watcher.Errors:
log.Fatalln("[FATAL] watch error:", err)
}
}
}()
// 定时刷新监听的文件
go func() {
for {
err := readDir(pwd, &files)
if err != nil {
log.Fatalln("[FATAL] read dir err", err)
}
for _, f := range files {
watcher.Add(f)
}
time.Sleep(30 * time.Second)
}
}()
// 先启动一次
autoBuildApp(appName)
select {}
},
}
func autoBuildApp(appName string) {
state.Lock()
defer state.Unlock()
log.Println("[INFO] rebuild app start ...")
if runtime.GOOS == "windows" {
appName += ".exe"
}
bcmd := exec.Command("go", "build", "-o", appName)
bcmd.Stderr = os.Stderr
bcmd.Stdout = os.Stdout
err := bcmd.Run()
if err != nil {
log.Println("[ERROR] rebuild app error: ", err)
return
}
restartApp(appName)
log.Println("[INFO] rebuild app success.")
}
func restartApp(appName string) {
log.Println("[INFO] restart app ", appName, "...")
defer func() {
if e := recover(); e != nil {
log.Println("[ERROR] restart app error: ", e)
}
}()
// 杀掉原先的 app
if cmd != nil && cmd.Process != nil {
err := cmd.Process.Kill()
if err != nil {
log.Fatalln("[FATAL] stop app error: ", err)
return
}
}
// 重启新的 app
go func() {
cmd = exec.Command("./" + appName)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
go cmd.Run()
//go func() {
// err := cmd.Run()
// if err != nil && err.Error() != "signal: killed" {
// log.Fatalln("[FATAL] start app error:", err)
// }
//}()
}()
}
func readDir(dir string, files *[]string) error {
err := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {
if f == nil {
return err
}
if strings.HasPrefix(f.Name(), ".") {
return nil
}
if f.IsDir() {
return nil
}
// 只取以 .go 结尾的文件
if !strings.HasSuffix(f.Name(), ".go") {
return nil
}
//fmt.Println(path, f.Name())
*files = append(*files, path)
return nil
})
return err
}
var rootCmd = &cobra.Command{}
func main() {
rootCmd.AddCommand(versionCmd)
rootCmd.AddCommand(initCmd)
rootCmd.AddCommand(newCmd)
rootCmd.AddCommand(runCmd)
rootCmd.AddCommand(genCmd)
if err := rootCmd.Execute(); err != nil {
log.Println("cmd run error:", err.Error())
os.Exit(1)
}
}
func init() {
// init cmd
initCmd.Flags().BoolP("mod", "", true, "use go mod ? only for dev user")
// init cmd
initCmd.Flags().StringP("app", "a", "", "app name")
_ = initCmd.MarkFlagRequired("app")
// module cmd
newCmd.Flags().StringP("module", "m", "", "module name")
newCmd.Flags().BoolP("advance", "a", false, "gen advance module which include service")
_ = newCmd.MarkFlagRequired("module")
// gen cmd
genCmd.Flags().StringP("http", "p", "", "http file name")
genCmd.Flags().StringP("rpc", "r", "", "rpc file name")
genCmd.Flags().StringP("cmd", "c", "", "cmd file name")
genCmd.Flags().StringP("task", "t", "", "task file name")
genCmd.Flags().StringP("service", "s", "", "service file name")
genCmd.Flags().StringP("model", "m", "", "model file name")
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
thumbnail/v2/job.go
|
package main
import (
"bytes"
"fmt"
"image"
_ "image/gif"
_ "image/jpeg"
"image/png"
"log"
"os"
"strings"
cosclient "github.com/duglin/cosclient/client"
"github.com/nfnt/resize"
)
func MakeThumbnail(inBuf []byte) ([]byte, error) {
inImage, _, err := image.Decode(bytes.NewReader(inBuf))
if err != nil {
return nil, fmt.Errorf("Error decoding image: %s", err)
}
buf := &bytes.Buffer{}
// resize to width using Lanczos resampling
// and preserve aspect ratio
thumb := resize.Resize(50, 50, inImage, resize.Lanczos3)
err = png.Encode(buf, thumb)
if err != nil {
return nil, fmt.Errorf("Error shrinking image: %s", err)
}
return buf.Bytes(), nil
}
func CalcThumbnails(bucketName string) error {
apiKey := os.Getenv("CLOUD_OBJECT_STORAGE_APIKEY")
svcID := os.Getenv("CLOUD_OBJECT_STORAGE_RESOURCE_INSTANCE_ID")
COSClient, err := cosclient.NewClient(apiKey, svcID)
if err != nil {
return err
}
objs, err := COSClient.ListObjects(bucketName)
if err != nil {
return err
}
names := []string{}
thumbs := map[string]bool{}
for _, obj := range objs {
if strings.HasSuffix(obj.Key, "-thumb") {
thumbs[obj.Key] = true
continue
}
names = append(names, obj.Key)
}
for _, name := range names {
if _, ok := thumbs[name+"-thumb"]; !ok {
log.Printf("Processing: %s", name)
image, err := COSClient.DownloadObject(bucketName, name)
if err != nil {
return fmt.Errorf("Error downloading %q: %s", name, err)
}
thumb, err := MakeThumbnail(image)
if err == nil {
err = COSClient.UploadObject(bucketName, name+"-thumb", thumb)
if err != nil {
return fmt.Errorf("Error uploading %q:%s", name+"-thumb",
err)
} else {
log.Printf("Added: %s", name+"-thumb")
}
} else {
return fmt.Errorf("Error processing %q: %s", name, err)
}
}
}
return nil
}
func main() {
bucketName := os.Getenv("BUCKET")
if bucketName == "" {
bucketName = "ce-images"
}
if err := CalcThumbnails(bucketName); err != nil {
fmt.Fprintf(os.Stderr, "Error calculating thumbnails: %s\n", err)
os.Exit(1)
}
}
|
[
"\"CLOUD_OBJECT_STORAGE_APIKEY\"",
"\"CLOUD_OBJECT_STORAGE_RESOURCE_INSTANCE_ID\"",
"\"BUCKET\""
] |
[] |
[
"CLOUD_OBJECT_STORAGE_RESOURCE_INSTANCE_ID",
"CLOUD_OBJECT_STORAGE_APIKEY",
"BUCKET"
] |
[]
|
["CLOUD_OBJECT_STORAGE_RESOURCE_INSTANCE_ID", "CLOUD_OBJECT_STORAGE_APIKEY", "BUCKET"]
|
go
| 3 | 0 | |
CTForces/wsgi.py
|
"""
WSGI config for CTForces project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CTForces.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.