filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
internal/bind/resolver_example_test.go | //+build integration
package bind_test
import (
"fmt"
"os"
"path/filepath"
"sort"
"github.com/kyma-project/helm-broker/internal"
"github.com/kyma-project/helm-broker/internal/bind"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
func ExampleNewResolver() {
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(os.Getenv("HOME"), ".kube", "config"))
fatalOnErr(err)
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
fatalOnErr(err)
// create namespace for test
nsSpec := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "resolver-example-test"}}
_, err = clientset.CoreV1().Namespaces().Create(nsSpec)
defer clientset.CoreV1().Namespaces().Delete(nsSpec.ObjectMeta.Name, &metav1.DeleteOptions{})
fatalOnErr(err)
_, err = clientset.CoreV1().Secrets(nsSpec.Name).Create(&v1.Secret{
Type: v1.SecretTypeOpaque,
ObjectMeta: metav1.ObjectMeta{
Name: "single-secret-test-redis",
},
StringData: map[string]string{
// The serialized form of the secret data is a base64 encoded string, so we need to pass here raw data
"redis-password": "gopherek",
},
})
fatalOnErr(err)
_, err = clientset.CoreV1().Secrets(nsSpec.Name).Create(&v1.Secret{
Type: v1.SecretTypeOpaque,
ObjectMeta: metav1.ObjectMeta{
Name: "all-secret-test-redis",
},
StringData: map[string]string{
// The serialized form of the secret data is a base64 encoded string, so we need to pass here raw data
"secret-key-no-1": "piko",
"secret-key-no-2": "bello",
},
})
fatalOnErr(err)
_, err = clientset.CoreV1().ConfigMaps(nsSpec.Name).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "single-cfg-map-test-redis",
},
Data: map[string]string{
"username": "redisMaster",
},
})
fatalOnErr(err)
_, err = clientset.CoreV1().ConfigMaps(nsSpec.Name).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "all-cfg-map-test-redis",
},
Data: map[string]string{
"cfg-key-no-1": "margarita",
"cfg-key-no-2": "capricciosa",
},
})
fatalOnErr(err)
_, err = clientset.CoreV1().Services(nsSpec.Name).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "example-renderer-test-redis",
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Selector: map[string]string{"app": "some-app"},
Ports: []v1.ServicePort{
{
Name: "redis",
Port: 123,
},
},
},
})
fatalOnErr(err)
resolver := bind.NewResolver(clientset.CoreV1())
out, err := resolver.Resolve(fixBindYAML(), internal.Namespace(nsSpec.Name))
fatalOnErr(err)
printSorted(out.Credentials)
// Output:
// key: HOST_PORT, value: 123
// key: REDIS_PASSWORD, value: gopherek
// key: REDIS_USERNAME, value: redisMaster
// key: URL, value: host1-example-renderer-test-redis.ns-name.svc.cluster.local:6379
// key: cfg-key-no-1, value: override-value
// key: cfg-key-no-2, value: capricciosa
// key: secret-key-no-1, value: piko
// key: secret-key-no-2, value: bello
}
func printSorted(m map[string]string) {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
fmt.Printf("key: %s, value: %s\n", key, m[key])
}
}
func fixBindYAML() []byte {
return []byte(`
credential:
- name: cfg-key-no-1
value: override-value
- name: URL
value: host1-example-renderer-test-redis.ns-name.svc.cluster.local:6379
- name: HOST_PORT
valueFrom:
serviceRef:
name: example-renderer-test-redis
jsonpath: '{ .spec.ports[?(@.name=="redis")].port }'
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: single-secret-test-redis
key: redis-password
- name: REDIS_USERNAME
valueFrom:
configMapKeyRef:
name: single-cfg-map-test-redis
key: username
credentialFrom:
- configMapRef:
name: all-cfg-map-test-redis
- secretRef:
name: all-secret-test-redis
`)
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
integration-cli/docker_cli_run_test.go | package main
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/docker/pkg/integration/checker"
icmd "github.com/docker/docker/pkg/integration/cmd"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/stringutils"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/nat"
"github.com/docker/libnetwork/resolvconf"
"github.com/docker/libnetwork/types"
"github.com/go-check/check"
libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
)
// "test123" should be printed by docker run
func (s *DockerSuite) TestRunEchoStdout(c *check.C) {
out, _ := dockerCmd(c, "run", "busybox", "echo", "test123")
if out != "test123\n" {
c.Fatalf("container should've printed 'test123', got '%s'", out)
}
}
// "test" should be printed
func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) {
out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test")
if out != "test\n" {
c.Errorf("container should've printed 'test'")
}
}
// docker run should not leak file descriptors. This test relies on Unix
// specific functionality and cannot run on Windows.
func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd")
// normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory
if out != "0 1 2 3\n" {
c.Errorf("container should've printed '0 1 2 3', not: %s", out)
}
}
// it should be possible to lookup Google DNS
// this will fail when Internet access is unavailable
func (s *DockerSuite) TestRunLookupGoogleDNS(c *check.C) {
testRequires(c, Network, NotArm)
if daemonPlatform == "windows" {
// nslookup isn't present in Windows busybox. Is built-in. Further,
// nslookup isn't present in nanoserver. Hence just use PowerShell...
dockerCmd(c, "run", WindowsBaseImage, "powershell", "Resolve-DNSName", "google.com")
} else {
dockerCmd(c, "run", DefaultImage, "nslookup", "google.com")
}
}
// the exit code should be 0
func (s *DockerSuite) TestRunExitCodeZero(c *check.C) {
dockerCmd(c, "run", "busybox", "true")
}
// the exit code should be 1
func (s *DockerSuite) TestRunExitCodeOne(c *check.C) {
_, exitCode, err := dockerCmdWithError("run", "busybox", "false")
c.Assert(err, checker.NotNil)
c.Assert(exitCode, checker.Equals, 1)
}
// it should be possible to pipe in data via stdin to a process running in a container
func (s *DockerSuite) TestRunStdinPipe(c *check.C) {
// TODO Windows: This needs some work to make compatible.
testRequires(c, DaemonIsLinux)
runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat")
runCmd.Stdin = strings.NewReader("blahblah")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
out = strings.TrimSpace(out)
dockerCmd(c, "wait", out)
logsOut, _ := dockerCmd(c, "logs", out)
containerLogs := strings.TrimSpace(logsOut)
if containerLogs != "blahblah" {
c.Errorf("logs didn't print the container's logs %s", containerLogs)
}
dockerCmd(c, "rm", out)
}
// the container's ID should be printed when starting a container in detached mode
func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) {
out, _ := dockerCmd(c, "run", "-d", "busybox", "true")
out = strings.TrimSpace(out)
dockerCmd(c, "wait", out)
rmOut, _ := dockerCmd(c, "rm", out)
rmOut = strings.TrimSpace(rmOut)
if rmOut != out {
c.Errorf("rm didn't print the container ID %s %s", out, rmOut)
}
}
// the working directory should be set correctly
func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) {
dir := "/root"
image := "busybox"
if daemonPlatform == "windows" {
dir = `C:/Windows`
}
// First with -w
out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd")
out = strings.TrimSpace(out)
if out != dir {
c.Errorf("-w failed to set working directory")
}
// Then with --workdir
out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd")
out = strings.TrimSpace(out)
if out != dir {
c.Errorf("--workdir failed to set working directory")
}
}
// pinging Google's DNS resolver should fail when we disable the networking
func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) {
count := "-c"
image := "busybox"
if daemonPlatform == "windows" {
count = "-n"
image = WindowsBaseImage
}
// First using the long form --net
out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8")
if err != nil && exitCode != 1 {
c.Fatal(out, err)
}
if exitCode != 1 {
c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8")
}
}
//test --link use container name to link target
func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as the networking
// settings are not populated back yet on inspect.
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox")
ip := inspectField(c, "parent", "NetworkSettings.Networks.bridge.IPAddress")
out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts")
if !strings.Contains(out, ip+" test") {
c.Fatalf("use a container name to link target failed")
}
}
//test --link use container id to link target
func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as the networking
// settings are not populated back yet on inspect.
testRequires(c, DaemonIsLinux)
cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox")
cID = strings.TrimSpace(cID)
ip := inspectField(c, cID, "NetworkSettings.Networks.bridge.IPAddress")
out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts")
if !strings.Contains(out, ip+" test") {
c.Fatalf("use a container id to link target failed")
}
}
func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet")
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// run a container in user-defined network udlinkNet with a link for an existing container
// and a link for a container that doesn't exist
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo",
"--link=third:bar", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// ping to first and its alias foo must succeed
_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
c.Assert(err, check.IsNil)
// ping to third and its alias must fail
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third")
c.Assert(err, check.NotNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar")
c.Assert(err, check.NotNil)
// start third container now
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top")
c.Assert(waitRun("third"), check.IsNil)
// ping to third and its alias must succeed now
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar")
c.Assert(err, check.IsNil)
}
func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet")
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo",
"busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// ping to first and its alias foo must succeed
_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
c.Assert(err, check.IsNil)
// Restart first container
dockerCmd(c, "restart", "first")
c.Assert(waitRun("first"), check.IsNil)
// ping to first and its alias foo must still succeed
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
c.Assert(err, check.IsNil)
// Restart second container
dockerCmd(c, "restart", "second")
c.Assert(waitRun("second"), check.IsNil)
// ping to first and its alias foo must still succeed
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
c.Assert(err, check.IsNil)
}
func (s *DockerSuite) TestRunWithNetAliasOnDefaultNetworks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
defaults := []string{"bridge", "host", "none"}
for _, net := range defaults {
out, _, err := dockerCmdWithError("run", "-d", "--net", net, "--net-alias", "alias_"+net, "busybox", "top")
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error())
}
}
func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "-d", "bridge", "net1")
cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// Check if default short-id alias is added automatically
id := strings.TrimSpace(cid1)
aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases")
c.Assert(aliases, checker.Contains, stringid.TruncateID(id))
cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Check if default short-id alias is added automatically
id = strings.TrimSpace(cid2)
aliases = inspectField(c, id, "NetworkSettings.Networks.net1.Aliases")
c.Assert(aliases, checker.Contains, stringid.TruncateID(id))
// ping to first and its network-scoped aliases
_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2")
c.Assert(err, check.IsNil)
// ping first container's short-id alias
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1))
c.Assert(err, check.IsNil)
// Restart first container
dockerCmd(c, "restart", "first")
c.Assert(waitRun("first"), check.IsNil)
// ping to first and its network-scoped aliases must succeed
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1")
c.Assert(err, check.IsNil)
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2")
c.Assert(err, check.IsNil)
// ping first container's short-id alias
_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1))
c.Assert(err, check.IsNil)
}
// Issue 9677.
func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) {
out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true")
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, "unknown flag: --exec-opt")
}
// Regression test for #4979
func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) {
var (
out string
exitCode int
)
// Create a file in a volume
if daemonPlatform == "windows" {
out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, WindowsBaseImage, "cmd", "/c", `echo hello > c:\some\dir\file`)
} else {
out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file")
}
if exitCode != 0 {
c.Fatal("1", out, exitCode)
}
// Read the file from another container using --volumes-from to access the volume in the second container
if daemonPlatform == "windows" {
out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", WindowsBaseImage, "cmd", "/c", `type c:\some\dir\file`)
} else {
out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file")
}
if exitCode != 0 {
c.Fatal("2", out, exitCode)
}
}
// Volume path is a symlink which also exists on the host, and the host side is a file not a dir
// But the volume call is just a normal volume, not a bind mount
func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) {
var (
dockerFile string
containerPath string
cmd string
)
// TODO Windows (Post TP5): This test cannot run on a Windows daemon as
// Windows does not support symlinks inside a volume path
testRequires(c, SameHostDaemon, DaemonIsLinux)
name := "test-volume-symlink"
dir, err := ioutil.TempDir("", name)
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(dir)
// In the case of Windows to Windows CI, if the machine is setup so that
// the temp directory is not the C: drive, this test is invalid and will
// not work.
if daemonPlatform == "windows" && strings.ToLower(dir[:1]) != "c" {
c.Skip("Requires TEMP to point to C: drive")
}
f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700)
if err != nil {
c.Fatal(err)
}
f.Close()
if daemonPlatform == "windows" {
dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", WindowsBaseImage, dir, dir)
containerPath = `c:\test\test`
cmd = "tasklist"
} else {
dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir)
containerPath = "/test/test"
cmd = "true"
}
if _, err := buildImage(name, dockerFile, false); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-v", containerPath, name, cmd)
}
// Volume path is a symlink in the container
func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) {
var (
dockerFile string
containerPath string
cmd string
)
// TODO Windows (Post TP5): This test cannot run on a Windows daemon as
// Windows does not support symlinks inside a volume path
testRequires(c, SameHostDaemon, DaemonIsLinux)
name := "test-volume-symlink2"
if daemonPlatform == "windows" {
dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", WindowsBaseImage, name, name)
containerPath = `c:\test\test`
cmd = "tasklist"
} else {
dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p /%s\nRUN ln -s /%s /test", name, name)
containerPath = "/test/test"
cmd = "true"
}
if _, err := buildImage(name, dockerFile, false); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-v", containerPath, name, cmd)
}
func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) {
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform == "windows" && windowsDaemonKV < 14350 {
c.Skip("Needs later Windows build for RO volumes")
}
if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 {
c.Fatalf("run should fail because volume is ro: exit code %d", code)
}
}
func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) {
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform == "windows" && windowsDaemonKV < 14350 {
c.Skip("Needs later Windows build for RO volumes")
}
var (
volumeDir string
fileInVol string
)
if daemonPlatform == "windows" {
volumeDir = `c:/test` // Forward-slash as using busybox
fileInVol = `c:/test/file`
} else {
testRequires(c, DaemonIsLinux)
volumeDir = "/test"
fileInVol = `/test/file`
}
dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true")
if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 {
c.Fatalf("run should fail because volume is ro: exit code %d", code)
}
}
// Regression test for #1201
func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) {
var (
volumeDir string
fileInVol string
)
if daemonPlatform == "windows" {
volumeDir = `c:/test` // Forward-slash as using busybox
fileInVol = `c:/test/file`
} else {
volumeDir = "/test"
fileInVol = "/test/file"
}
dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true")
dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol)
if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: bar`) {
c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out)
}
dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol)
}
func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) {
testRequires(c, SameHostDaemon)
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
hostpath := randomTmpDirPath("test", daemonPlatform)
if err := os.MkdirAll(hostpath, 0755); err != nil {
c.Fatalf("Failed to create %s: %q", hostpath, err)
}
defer os.RemoveAll(hostpath)
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform == "windows" && windowsDaemonKV < 14350 {
c.Skip("Needs later Windows build for RO volumes")
}
dockerCmd(c, "run", "--name", "parent", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true")
// Expect this "rw" mode to be be ignored since the inherited volume is "ro"
if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil {
c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`")
}
dockerCmd(c, "run", "--name", "parent2", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true")
// Expect this to be read-only since both are "ro"
if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil {
c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`")
}
}
// Test for GH#10618
func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) {
path1 := randomTmpDirPath("test1", daemonPlatform)
path2 := randomTmpDirPath("test2", daemonPlatform)
someplace := ":/someplace"
if daemonPlatform == "windows" {
// Windows requires that the source directory exists before calling HCS
testRequires(c, SameHostDaemon)
someplace = `:c:\someplace`
if err := os.MkdirAll(path1, 0755); err != nil {
c.Fatalf("Failed to create %s: %q", path1, err)
}
defer os.RemoveAll(path1)
if err := os.MkdirAll(path2, 0755); err != nil {
c.Fatalf("Failed to create %s: %q", path1, err)
}
defer os.RemoveAll(path2)
}
mountstr1 := path1 + someplace
mountstr2 := path2 + someplace
if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil {
c.Fatal("Expected error about duplicate mount definitions")
} else {
if !strings.Contains(out, "Duplicate mount point") {
c.Fatalf("Expected 'duplicate mount point' error, got %v", out)
}
}
// Test for https://github.com/docker/docker/issues/22093
volumename1 := "test1"
volumename2 := "test2"
volume1 := volumename1 + someplace
volume2 := volumename2 + someplace
if out, _, err := dockerCmdWithError("run", "-v", volume1, "-v", volume2, "busybox", "true"); err == nil {
c.Fatal("Expected error about duplicate mount definitions")
} else {
if !strings.Contains(out, "Duplicate mount point") {
c.Fatalf("Expected 'duplicate mount point' error, got %v", out)
}
}
// create failed should have create volume volumename1 or volumename2
// we should remove volumename2 or volumename2 successfully
out, _ := dockerCmd(c, "volume", "ls")
if strings.Contains(out, volumename1) {
dockerCmd(c, "volume", "rm", volumename1)
} else {
dockerCmd(c, "volume", "rm", volumename2)
}
}
// Test for #1351
func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) {
prefix := ""
if daemonPlatform == "windows" {
prefix = `c:`
}
dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo")
dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo")
}
func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) {
prefix := ""
if daemonPlatform == "windows" {
prefix = `c:`
}
dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo")
dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar")
dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar")
}
// this tests verifies the ID format for the container
func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) {
out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true")
if err != nil {
c.Fatal(err)
}
if exit != 0 {
c.Fatalf("expected exit code 0 received %d", exit)
}
match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n"))
if err != nil {
c.Fatal(err)
}
if !match {
c.Fatalf("Invalid container ID: %s", out)
}
}
// Test that creating a container with a volume doesn't crash. Regression test for #995.
func (s *DockerSuite) TestRunCreateVolume(c *check.C) {
prefix := ""
if daemonPlatform == "windows" {
prefix = `c:`
}
dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true")
}
// Test that creating a volume with a symlink in its path works correctly. Test for #5152.
// Note that this bug happens only with symlinks with a target that starts with '/'.
func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) {
// Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...)
testRequires(c, DaemonIsLinux)
image := "docker-test-createvolumewithsymlink"
buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-")
buildCmd.Stdin = strings.NewReader(`FROM busybox
RUN ln -s home /bar`)
buildCmd.Dir = workingDirectory
err := buildCmd.Run()
if err != nil {
c.Fatalf("could not build '%s': %v", image, err)
}
_, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo")
if err != nil || exitCode != 0 {
c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode)
}
volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo")
c.Assert(err, checker.IsNil)
_, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink")
if err != nil || exitCode != 0 {
c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode)
}
_, err = os.Stat(volPath)
if !os.IsNotExist(err) {
c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath)
}
}
// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`.
func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) {
// TODO Windows (Post TP5): This test cannot run on a Windows daemon as
// Windows does not support symlinks inside a volume path
testRequires(c, DaemonIsLinux)
name := "docker-test-volumesfromsymlinkpath"
prefix := ""
dfContents := `FROM busybox
RUN ln -s home /foo
VOLUME ["/foo/bar"]`
if daemonPlatform == "windows" {
prefix = `c:`
dfContents = `FROM ` + WindowsBaseImage + `
RUN mkdir c:\home
RUN mklink /D c:\foo c:\home
VOLUME ["c:/foo/bar"]
ENTRYPOINT c:\windows\system32\cmd.exe`
}
buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-")
buildCmd.Stdin = strings.NewReader(dfContents)
buildCmd.Dir = workingDirectory
err := buildCmd.Run()
if err != nil {
c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err)
}
out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name)
if err != nil || exitCode != 0 {
c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out)
}
_, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar")
if err != nil || exitCode != 0 {
c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode)
}
}
func (s *DockerSuite) TestRunExitCode(c *check.C) {
var (
exit int
err error
)
_, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72")
if err == nil {
c.Fatal("should not have a non nil error")
}
if exit != 72 {
c.Fatalf("expected exit code 72 received %d", exit)
}
}
func (s *DockerSuite) TestRunUserDefaults(c *check.C) {
expected := "uid=0(root) gid=0(root)"
if daemonPlatform == "windows" {
expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)"
}
out, _ := dockerCmd(c, "run", "busybox", "id")
if !strings.Contains(out, expected) {
c.Fatalf("expected '%s' got %s", expected, out)
}
}
func (s *DockerSuite) TestRunUserByName(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id")
if !strings.Contains(out, "uid=0(root) gid=0(root)") {
c.Fatalf("expected root user got %s", out)
}
}
func (s *DockerSuite) TestRunUserByID(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id")
if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") {
c.Fatalf("expected daemon user got %s", out)
}
}
func (s *DockerSuite) TestRunUserByIDBig(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux, NotArm)
out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id")
if err == nil {
c.Fatal("No error, but must be.", out)
}
if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) {
c.Fatalf("expected error about uids range, got %s", out)
}
}
func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id")
if err == nil {
c.Fatal("No error, but must be.", out)
}
if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) {
c.Fatalf("expected error about uids range, got %s", out)
}
}
func (s *DockerSuite) TestRunUserByIDZero(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id")
if err != nil {
c.Fatal(err, out)
}
if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") {
c.Fatalf("expected daemon user got %s", out)
}
}
func (s *DockerSuite) TestRunUserNotFound(c *check.C) {
// TODO Windows: This test cannot run on a Windows daemon as Windows does
// not support the use of -u
testRequires(c, DaemonIsLinux)
_, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id")
if err == nil {
c.Fatal("unknown user should cause container to fail")
}
}
func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) {
sleepTime := "2"
group := sync.WaitGroup{}
group.Add(2)
errChan := make(chan error, 2)
for i := 0; i < 2; i++ {
go func() {
defer group.Done()
_, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime)
errChan <- err
}()
}
group.Wait()
close(errChan)
for err := range errChan {
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestRunEnvironment(c *check.C) {
// TODO Windows: Environment handling is different between Linux and
// Windows and this test relies currently on unix functionality.
testRequires(c, DaemonIsLinux)
cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env")
cmd.Env = append(os.Environ(),
"TRUE=false",
"TRICKY=tri\ncky\n",
)
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(err, out)
}
actualEnv := strings.Split(strings.TrimSpace(out), "\n")
sort.Strings(actualEnv)
goodEnv := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOSTNAME=testing",
"FALSE=true",
"TRUE=false",
"TRICKY=tri",
"cky",
"",
"HOME=/root",
}
sort.Strings(goodEnv)
if len(goodEnv) != len(actualEnv) {
c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", "))
}
for i := range goodEnv {
if actualEnv[i] != goodEnv[i] {
c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
}
}
}
func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) {
// TODO Windows: Environment handling is different between Linux and
// Windows and this test relies currently on unix functionality.
testRequires(c, DaemonIsLinux)
// Test to make sure that when we use -e on env vars that are
// not set in our local env that they're removed (if present) in
// the container
cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env")
cmd.Env = appendBaseEnv(true)
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(err, out)
}
actualEnv := strings.Split(strings.TrimSpace(out), "\n")
sort.Strings(actualEnv)
goodEnv := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOME=/root",
}
sort.Strings(goodEnv)
if len(goodEnv) != len(actualEnv) {
c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", "))
}
for i := range goodEnv {
if actualEnv[i] != goodEnv[i] {
c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
}
}
}
func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) {
// TODO Windows: Environment handling is different between Linux and
// Windows and this test relies currently on unix functionality.
testRequires(c, DaemonIsLinux)
// Test to make sure that when we use -e on env vars that are
// already in the env that we're overriding them
cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env")
cmd.Env = appendBaseEnv(true, "HOSTNAME=bar")
out, _, err := runCommandWithOutput(cmd)
if err != nil {
c.Fatal(err, out)
}
actualEnv := strings.Split(strings.TrimSpace(out), "\n")
sort.Strings(actualEnv)
goodEnv := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOME=/root2",
"HOSTNAME=bar",
}
sort.Strings(goodEnv)
if len(goodEnv) != len(actualEnv) {
c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", "))
}
for i := range goodEnv {
if actualEnv[i] != goodEnv[i] {
c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
}
}
}
func (s *DockerSuite) TestRunContainerNetwork(c *check.C) {
if daemonPlatform == "windows" {
// Windows busybox does not have ping. Use built in ping instead.
dockerCmd(c, "run", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1")
} else {
dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1")
}
}
func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) {
// TODO Windows: This is Linux specific as --link is not supported and
// this will be deprecated in favor of container networking model.
testRequires(c, DaemonIsLinux, NotUserNamespace)
dockerCmd(c, "run", "--name", "linked", "busybox", "true")
_, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true")
if err == nil {
c.Fatal("Expected error")
}
}
// #7851 hostname outside container shows FQDN, inside only shortname
// For testing purposes it is not required to set host's hostname directly
// and use "--net=host" (as the original issue submitter did), as the same
// codepath is executed with "docker run -h <hostname>". Both were manually
// tested, but this testcase takes the simpler path of using "run -h .."
func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) {
// TODO Windows: -h is not yet functional.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname")
if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" {
c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual)
}
}
func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) {
// Not applicable for Windows as Windows daemon does not support
// the concept of --privileged, and mknod is a Unix concept.
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) {
// Not applicable for Windows as Windows daemon does not support
// the concept of --privileged, and mknod is a Unix concept.
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls")
if err == nil {
c.Fatal(err, out)
}
}
func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop or mknod
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop or mknod
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop or mknod
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-drop or mknod
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-add
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls")
if err == nil {
c.Fatal(err, out)
}
}
func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-add
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-add
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) {
// Not applicable for Windows as there is no concept of --cap-add
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunGroupAdd(c *check.C) {
// Not applicable for Windows as there is no concept of --group-add
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id")
groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777"
if actual := strings.Trim(out, "\r\n"); actual != groupsList {
c.Fatalf("expected output %s received %s", groupsList, actual)
}
}
func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) {
// Not applicable for Windows as there is no concept of --privileged
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) {
// Not applicable for Windows as there is no concept of unprivileged
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok")
if err == nil {
c.Fatal(err, out)
}
if actual := strings.Trim(out, "\r\n"); actual == "ok" {
c.Fatalf("expected output not ok received %s", actual)
}
}
func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) {
// Not applicable for Windows as there is no concept of unprivileged
testRequires(c, DaemonIsLinux, NotArm)
if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 {
c.Fatal("sys should not be writable in a non privileged container")
}
}
func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) {
// Not applicable for Windows as there is no concept of unprivileged
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 {
c.Fatalf("sys should be writable in privileged container")
}
}
func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) {
// Not applicable for Windows as there is no concept of unprivileged
testRequires(c, DaemonIsLinux)
if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 {
c.Fatal("proc should not be writable in a non privileged container")
}
}
func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) {
// Not applicable for Windows as there is no concept of --privileged
testRequires(c, DaemonIsLinux, NotUserNamespace)
if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 {
c.Fatalf("proc should be writable in privileged container")
}
}
func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) {
// Not applicable on Windows as /dev/ is a Unix specific concept
// TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null")
deviceLineFields := strings.Fields(out)
deviceLineFields[6] = ""
deviceLineFields[7] = ""
deviceLineFields[8] = ""
expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"}
if !(reflect.DeepEqual(deviceLineFields, expected)) {
c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out)
}
}
func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) {
// Not applicable on Windows as /dev/ is a Unix specific concept
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero")
if actual := strings.Trim(out, "\r\n"); actual[0] == '0' {
c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual)
}
}
func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) {
// Not applicable on Windows as it does not support chroot
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "busybox", "chroot", "/", "true")
}
func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) {
// Not applicable on Windows as Windows does not support --device
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo")
if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" {
c.Fatalf("expected output /dev/nulo, received %s", actual)
}
}
func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) {
// Not applicable on Windows as Windows does not support --device
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero")
if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" {
c.Fatalf("expected output /dev/zero, received %s", actual)
}
}
func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) {
// Not applicable on Windows as Windows does not support --device
testRequires(c, DaemonIsLinux, NotUserNamespace)
_, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero")
if err == nil {
c.Fatalf("run container with device mode ro should fail")
}
}
func (s *DockerSuite) TestRunModeHostname(c *check.C) {
// Not applicable on Windows as Windows does not support -h
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname")
if actual := strings.Trim(out, "\r\n"); actual != "testhostname" {
c.Fatalf("expected 'testhostname', but says: %q", actual)
}
out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname")
hostname, err := os.Hostname()
if err != nil {
c.Fatal(err)
}
if actual := strings.Trim(out, "\r\n"); actual != hostname {
c.Fatalf("expected %q, but says: %q", hostname, actual)
}
}
func (s *DockerSuite) TestRunRootWorkdir(c *check.C) {
out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd")
expected := "/\n"
if daemonPlatform == "windows" {
expected = "C:" + expected
}
if out != expected {
c.Fatalf("pwd returned %q (expected %s)", s, expected)
}
}
func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) {
if daemonPlatform == "windows" {
// Windows busybox will fail with Permission Denied on items such as pagefile.sys
dockerCmd(c, "run", "-v", `c:\:c:\host`, WindowsBaseImage, "cmd", "-c", "dir", `c:\host`)
} else {
dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host")
}
}
func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) {
mount := "/:/"
targetDir := "/host"
if daemonPlatform == "windows" {
mount = `c:\:c\`
targetDir = "c:/host" // Forward slash as using busybox
}
out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir)
if err == nil {
c.Fatal(out, err)
}
}
// Verify that a container gets default DNS when only localhost resolvers exist
func (s *DockerSuite) TestRunDNSDefaultOptions(c *check.C) {
// Not applicable on Windows as this is testing Unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
// preserve original resolv.conf for restoring after test
origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
if os.IsNotExist(err) {
c.Fatalf("/etc/resolv.conf does not exist")
}
// defer restored original conf
defer func() {
if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil {
c.Fatal(err)
}
}()
// test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost
// 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by
// GetNameservers(), leading to a replacement of nameservers with the default set
tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1")
if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
c.Fatal(err)
}
actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf")
// check that the actual defaults are appended to the commented out
// localhost resolver (which should be preserved)
// NOTE: if we ever change the defaults from google dns, this will break
expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n"
if actual != expected {
c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual)
}
}
func (s *DockerSuite) TestRunDNSOptions(c *check.C) {
// Not applicable on Windows as Windows does not support --dns*, or
// the Unix-specific functionality of resolv.conf.
testRequires(c, DaemonIsLinux)
out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf")
// The client will get a warning on stderr when setting DNS to a localhost address; verify this:
if !strings.Contains(stderr, "Localhost DNS setting") {
c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr)
}
actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1)
if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" {
c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual)
}
out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf")
actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1)
if actual != "nameserver 127.0.0.1 options ndots:3" {
c.Fatalf("expected 'nameserver 127.0.0.1 options ndots:3', but says: %q", actual)
}
}
func (s *DockerSuite) TestRunDNSRepeatOptions(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf")
actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1)
if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" {
c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual)
}
}
func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *check.C) {
// Not applicable on Windows as testing Unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
if os.IsNotExist(err) {
c.Fatalf("/etc/resolv.conf does not exist")
}
hostNameservers := resolvconf.GetNameservers(origResolvConf, types.IP)
hostSearch := resolvconf.GetSearchDomains(origResolvConf)
var out string
out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf")
if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" {
c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0]))
}
actualSearch := resolvconf.GetSearchDomains([]byte(out))
if len(actualSearch) != len(hostSearch) {
c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch))
}
for i := range actualSearch {
if actualSearch[i] != hostSearch[i] {
c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i])
}
}
out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf")
actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP)
if len(actualNameservers) != len(hostNameservers) {
c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNameservers), len(actualNameservers))
}
for i := range actualNameservers {
if actualNameservers[i] != hostNameservers[i] {
c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNameservers[i])
}
}
if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" {
c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0]))
}
// test with file
tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1")
if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
c.Fatal(err)
}
// put the old resolvconf back
defer func() {
if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil {
c.Fatal(err)
}
}()
resolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
if os.IsNotExist(err) {
c.Fatalf("/etc/resolv.conf does not exist")
}
hostNameservers = resolvconf.GetNameservers(resolvConf, types.IP)
hostSearch = resolvconf.GetSearchDomains(resolvConf)
out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf")
if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 {
c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers)
}
actualSearch = resolvconf.GetSearchDomains([]byte(out))
if len(actualSearch) != len(hostSearch) {
c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch))
}
for i := range actualSearch {
if actualSearch[i] != hostSearch[i] {
c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i])
}
}
}
// Test to see if a non-root user can resolve a DNS name. Also
// check if the container resolv.conf file has at least 0644 perm.
func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) {
// Not applicable on Windows as Windows does not support --user
testRequires(c, SameHostDaemon, Network, DaemonIsLinux, NotArm)
dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org")
cID, err := getIDByName("testperm")
if err != nil {
c.Fatal(err)
}
fmode := (os.FileMode)(0644)
finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf"))
if err != nil {
c.Fatal(err)
}
if (finfo.Mode() & fmode) != fmode {
c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String())
}
}
// Test if container resolv.conf gets updated the next time it restarts
// if host /etc/resolv.conf has changed. This only applies if the container
// uses the host's /etc/resolv.conf and does not have any dns options provided.
func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) {
// Not applicable on Windows as testing unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
c.Skip("Unstable test, to be re-activated once #19937 is resolved")
tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n")
tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1")
//take a copy of resolv.conf for restoring after test completes
resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
if err != nil {
c.Fatal(err)
}
// This test case is meant to test monitoring resolv.conf when it is
// a regular file not a bind mounc. So we unmount resolv.conf and replace
// it with a file containing the original settings.
mounted, err := mount.Mounted("/etc/resolv.conf")
if err != nil {
c.Fatal(err)
}
if mounted {
cmd := exec.Command("umount", "/etc/resolv.conf")
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
}
//cleanup
defer func() {
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
}
}()
//1. test that a restarting container gets an updated resolv.conf
dockerCmd(c, "run", "--name=first", "busybox", "true")
containerID1, err := getIDByName("first")
if err != nil {
c.Fatal(err)
}
// replace resolv.conf with our temporary copy
bytesResolvConf := []byte(tmpResolvConf)
if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
c.Fatal(err)
}
// start the container again to pickup changes
dockerCmd(c, "start", "first")
// check for update in container
containerResolv, err := readContainerFile(containerID1, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if !bytes.Equal(containerResolv, bytesResolvConf) {
c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
}
/* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
} */
//2. test that a restarting container does not receive resolv.conf updates
// if it modified the container copy of the starting point resolv.conf
dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf")
containerID2, err := getIDByName("second")
if err != nil {
c.Fatal(err)
}
//make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
}
// start the container again
dockerCmd(c, "start", "second")
// check for update in container
containerResolv, err = readContainerFile(containerID2, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if bytes.Equal(containerResolv, resolvConfSystem) {
c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv))
}
//3. test that a running container's resolv.conf is not modified while running
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
runningContainerID := strings.TrimSpace(out)
// replace resolv.conf
if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
c.Fatal(err)
}
// check for update in container
containerResolv, err = readContainerFile(runningContainerID, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if bytes.Equal(containerResolv, bytesResolvConf) {
c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv))
}
//4. test that a running container's resolv.conf is updated upon restart
// (the above container is still running..)
dockerCmd(c, "restart", runningContainerID)
// check for update in container
containerResolv, err = readContainerFile(runningContainerID, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if !bytes.Equal(containerResolv, bytesResolvConf) {
c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv))
}
//5. test that additions of a localhost resolver are cleaned from
// host resolv.conf before updating container's resolv.conf copies
// replace resolv.conf with a localhost-only nameserver copy
bytesResolvConf = []byte(tmpLocalhostResolvConf)
if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
c.Fatal(err)
}
// start the container again to pickup changes
dockerCmd(c, "start", "first")
// our first exited container ID should have been updated, but with default DNS
// after the cleanup of resolv.conf found only a localhost nameserver:
containerResolv, err = readContainerFile(containerID1, "resolv.conf")
if err != nil {
c.Fatal(err)
}
expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n"
if !bytes.Equal(containerResolv, []byte(expected)) {
c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv))
}
//6. Test that replacing (as opposed to modifying) resolv.conf triggers an update
// of containers' resolv.conf.
// Restore the original resolv.conf
if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
c.Fatal(err)
}
// Run the container so it picks up the old settings
dockerCmd(c, "run", "--name=third", "busybox", "true")
containerID3, err := getIDByName("third")
if err != nil {
c.Fatal(err)
}
// Create a modified resolv.conf.aside and override resolv.conf with it
bytesResolvConf = []byte(tmpResolvConf)
if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil {
c.Fatal(err)
}
err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf")
if err != nil {
c.Fatal(err)
}
// start the container again to pickup changes
dockerCmd(c, "start", "third")
// check for update in container
containerResolv, err = readContainerFile(containerID3, "resolv.conf")
if err != nil {
c.Fatal(err)
}
if !bytes.Equal(containerResolv, bytesResolvConf) {
c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv))
}
//cleanup, restore original resolv.conf happens in defer func()
}
func (s *DockerSuite) TestRunAddHost(c *check.C) {
// Not applicable on Windows as it does not support --add-host
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts")
actual := strings.Trim(out, "\r\n")
if actual != "86.75.30.9\textra" {
c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual)
}
}
// Regression test for #6983
func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) {
_, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true")
if exitCode != 0 {
c.Fatalf("Container should have exited with error code 0")
}
}
// Regression test for #6983
func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) {
_, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true")
if exitCode != 0 {
c.Fatalf("Container should have exited with error code 0")
}
}
// Regression test for #6983
func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) {
_, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true")
if exitCode != 0 {
c.Fatalf("Container should have exited with error code 0")
}
}
// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode
// but using --attach instead of -a to make sure we read the flag correctly
func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true")
_, stderr, _, err := runCommandWithStdoutStderr(cmd)
if err == nil {
c.Fatal("Container should have exited with error code different than 0")
} else if !strings.Contains(stderr, "Conflicting options: -a and -d") {
c.Fatal("Should have been returned an error with conflicting options -a and -d")
}
}
func (s *DockerSuite) TestRunState(c *check.C) {
// TODO Windows: This needs some rework as Windows busybox does not support top
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
id := strings.TrimSpace(out)
state := inspectField(c, id, "State.Running")
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid1 := inspectField(c, id, "State.Pid")
if pid1 == "0" {
c.Fatal("Container state Pid 0")
}
dockerCmd(c, "stop", id)
state = inspectField(c, id, "State.Running")
if state != "false" {
c.Fatal("Container state is 'running'")
}
pid2 := inspectField(c, id, "State.Pid")
if pid2 == pid1 {
c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
}
dockerCmd(c, "start", id)
state = inspectField(c, id, "State.Running")
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid3 := inspectField(c, id, "State.Pid")
if pid3 == pid1 {
c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
}
}
// Test for #1737
func (s *DockerSuite) TestRunCopyVolumeUIDGID(c *check.C) {
// Not applicable on Windows as it does not support uid or gid in this way
testRequires(c, DaemonIsLinux)
name := "testrunvolumesuidgid"
_, err := buildImage(name,
`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`,
true)
if err != nil {
c.Fatal(err)
}
// Test that the uid and gid is copied from the image to the volume
out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'")
out = strings.TrimSpace(out)
if out != "dockerio:dockerio" {
c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out)
}
}
// Test for #1582
func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) {
// TODO Windows, post TP5. Windows does not yet support volume functionality
// that copies from the image to the volume.
testRequires(c, DaemonIsLinux)
name := "testruncopyvolumecontent"
_, err := buildImage(name,
`FROM busybox
RUN mkdir -p /hello/local && echo hello > /hello/local/world`,
true)
if err != nil {
c.Fatal(err)
}
// Test that the content is copied from the image to the volume
out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello")
if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) {
c.Fatal("Container failed to transfer content to volume")
}
}
func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) {
name := "testrunmdcleanuponentrypoint"
if _, err := buildImage(name,
`FROM busybox
ENTRYPOINT ["echo"]
CMD ["testingpoint"]`,
true); err != nil {
c.Fatal(err)
}
out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name)
if exit != 0 {
c.Fatalf("expected exit code 0 received %d, out: %q", exit, out)
}
out = strings.TrimSpace(out)
expected := "root"
if daemonPlatform == "windows" {
if strings.Contains(WindowsBaseImage, "windowsservercore") {
expected = `user manager\containeradministrator`
} else {
expected = `ContainerAdministrator` // nanoserver
}
}
if out != expected {
c.Fatalf("Expected output %s, got %q. %s", expected, out, WindowsBaseImage)
}
}
// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected
func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) {
existingFile := "/bin/cat"
expected := "not a directory"
if daemonPlatform == "windows" {
existingFile = `\windows\system32\ntdll.dll`
expected = `Cannot mkdir: \windows\system32\ntdll.dll is not a directory.`
}
out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox")
if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) {
c.Fatalf("Existing binary as a directory should error out with exitCode 125; we got: %s, exitCode: %d", out, exitCode)
}
}
func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) {
name := "testrunexitonstdinclose"
meow := "/bin/cat"
delay := 60
if daemonPlatform == "windows" {
meow = "cat"
}
runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow)
stdin, err := runCmd.StdinPipe()
if err != nil {
c.Fatal(err)
}
stdout, err := runCmd.StdoutPipe()
if err != nil {
c.Fatal(err)
}
if err := runCmd.Start(); err != nil {
c.Fatal(err)
}
if _, err := stdin.Write([]byte("hello\n")); err != nil {
c.Fatal(err)
}
r := bufio.NewReader(stdout)
line, err := r.ReadString('\n')
if err != nil {
c.Fatal(err)
}
line = strings.TrimSpace(line)
if line != "hello" {
c.Fatalf("Output should be 'hello', got '%q'", line)
}
if err := stdin.Close(); err != nil {
c.Fatal(err)
}
finish := make(chan error)
go func() {
finish <- runCmd.Wait()
close(finish)
}()
select {
case err := <-finish:
c.Assert(err, check.IsNil)
case <-time.After(time.Duration(delay) * time.Second):
c.Fatal("docker run failed to exit on stdin close")
}
state := inspectField(c, name, "State.Running")
if state != "false" {
c.Fatal("Container must be stopped after stdin closing")
}
}
// Test run -i --restart xxx doesn't hang
func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) {
name := "test-inter-restart"
result := icmd.StartCmd(icmd.Cmd{
Command: []string{dockerBinary, "run", "-i", "--name", name, "--restart=always", "busybox", "sh"},
Stdin: bytes.NewBufferString("exit 11"),
})
c.Assert(result.Error, checker.IsNil)
defer func() {
dockerCmdWithResult("stop", name).Assert(c, icmd.Success)
}()
result = icmd.WaitOnCmd(60*time.Second, result)
c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 11})
}
// Test for #2267
func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) {
// Cannot run on Windows as Windows does not support diff.
testRequires(c, DaemonIsLinux)
name := "writehosts"
out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts")
if !strings.Contains(out, "test2267") {
c.Fatal("/etc/hosts should contain 'test2267'")
}
out, _ = dockerCmd(c, "diff", name)
if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) {
c.Fatal("diff should be empty")
}
}
func eqToBaseDiff(out string, c *check.C) bool {
name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32)
dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello")
cID, err := getIDByName(name)
c.Assert(err, check.IsNil)
baseDiff, _ := dockerCmd(c, "diff", cID)
baseArr := strings.Split(baseDiff, "\n")
sort.Strings(baseArr)
outArr := strings.Split(out, "\n")
sort.Strings(outArr)
return sliceEq(baseArr, outArr)
}
func sliceEq(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// Test for #2267
func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) {
// Cannot run on Windows as Windows does not support diff.
testRequires(c, DaemonIsLinux)
name := "writehostname"
out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname")
if !strings.Contains(out, "test2267") {
c.Fatal("/etc/hostname should contain 'test2267'")
}
out, _ = dockerCmd(c, "diff", name)
if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) {
c.Fatal("diff should be empty")
}
}
// Test for #2267
func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) {
// Cannot run on Windows as Windows does not support diff.
testRequires(c, DaemonIsLinux)
name := "writeresolv"
out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf")
if !strings.Contains(out, "test2267") {
c.Fatal("/etc/resolv.conf should contain 'test2267'")
}
out, _ = dockerCmd(c, "diff", name)
if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) {
c.Fatal("diff should be empty")
}
}
func (s *DockerSuite) TestRunWithBadDevice(c *check.C) {
// Cannot run on Windows as Windows does not support --device
testRequires(c, DaemonIsLinux)
name := "baddevice"
out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true")
if err == nil {
c.Fatal("Run should fail with bad device")
}
expected := `"/etc": not a device node`
if !strings.Contains(out, expected) {
c.Fatalf("Output should contain %q, actual out: %q", expected, out)
}
}
func (s *DockerSuite) TestRunEntrypoint(c *check.C) {
name := "entrypoint"
out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "echo", "busybox", "-n", "foobar")
expected := "foobar"
if out != expected {
c.Fatalf("Output should be %q, actual out: %q", expected, out)
}
}
func (s *DockerSuite) TestRunBindMounts(c *check.C) {
testRequires(c, SameHostDaemon)
if daemonPlatform == "linux" {
testRequires(c, DaemonIsLinux, NotUserNamespace)
}
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
tmpDir, err := ioutil.TempDir("", "docker-test-container")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
writeFile(path.Join(tmpDir, "touch-me"), "", c)
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform != "windows" || windowsDaemonKV >= 14350 {
// Test reading from a read-only bind mount
out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp")
if !strings.Contains(out, "touch-me") {
c.Fatal("Container failed to read from bind mount")
}
}
// test writing to bind mount
if daemonPlatform == "windows" {
dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla")
} else {
dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla")
}
readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist
// test mounting to an illegal destination directory
_, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".")
if err == nil {
c.Fatal("Container bind mounted illegal directory")
}
// Windows does not (and likely never will) support mounting a single file
if daemonPlatform != "windows" {
// test mount a file
dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla")
content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist
expected := "yotta"
if content != expected {
c.Fatalf("Output should be %q, actual out: %q", expected, content)
}
}
}
// Ensure that CIDFile gets deleted if it's empty
// Perform this test by making `docker run` fail
func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) {
// Skip on Windows. Base image on Windows has a CMD set in the image.
testRequires(c, DaemonIsLinux)
tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tmpCidFile := path.Join(tmpDir, "cid")
image := "emptyfs"
if daemonPlatform == "windows" {
// Windows can't support an emptyfs image. Just use the regular Windows image
image = WindowsBaseImage
}
out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image)
if err == nil {
c.Fatalf("Run without command must fail. out=%s", out)
} else if !strings.Contains(out, "No command specified") {
c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err)
}
if _, err := os.Stat(tmpCidFile); err == nil {
c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile)
}
}
// #2098 - Docker cidFiles only contain short version of the containerId
//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test"
// TestRunCidFile tests that run --cidfile returns the longid
func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) {
tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
if err != nil {
c.Fatal(err)
}
tmpCidFile := path.Join(tmpDir, "cid")
defer os.RemoveAll(tmpDir)
out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true")
id := strings.TrimSpace(out)
buffer, err := ioutil.ReadFile(tmpCidFile)
if err != nil {
c.Fatal(err)
}
cid := string(buffer)
if len(cid) != 64 {
c.Fatalf("--cidfile should be a long id, not %q", id)
}
if cid != id {
c.Fatalf("cid must be equal to %s, got %s", id, cid)
}
}
func (s *DockerSuite) TestRunSetMacAddress(c *check.C) {
mac := "12:34:56:78:9a:bc"
var out string
if daemonPlatform == "windows" {
out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'")
mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs
} else {
out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'")
}
actualMac := strings.TrimSpace(out)
if actualMac != mac {
c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac)
}
}
func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) {
// TODO Windows. Network settings are not propagated back to inspect.
testRequires(c, DaemonIsLinux)
mac := "12:34:56:78:9a:bc"
out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top")
id := strings.TrimSpace(out)
inspectedMac := inspectField(c, id, "NetworkSettings.Networks.bridge.MacAddress")
if inspectedMac != mac {
c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac)
}
}
// test docker run use an invalid mac address
func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) {
out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox")
//use an invalid mac address should with an error out
if err == nil || !strings.Contains(out, "is not a valid mac address") {
c.Fatalf("run with an invalid --mac-address should with error out")
}
}
func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) {
// TODO Windows. Network settings are not propagated back to inspect.
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top")
id := strings.TrimSpace(out)
ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress")
iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip),
"!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT")
out, _, err := runCommandWithOutput(iptCmd)
if err != nil {
c.Fatal(err, out)
}
if err := deleteContainer(id); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top")
}
func (s *DockerSuite) TestRunPortInUse(c *check.C) {
// TODO Windows. The duplicate NAT message returned by Windows will be
// changing as is currently completely undecipherable. Does need modifying
// to run sh rather than top though as top isn't in Windows busybox.
testRequires(c, SameHostDaemon, DaemonIsLinux)
port := "1234"
dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top")
out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top")
if err == nil {
c.Fatalf("Binding on used port must fail")
}
if !strings.Contains(out, "port is already allocated") {
c.Fatalf("Out must be about \"port is already allocated\", got %s", out)
}
}
// https://github.com/docker/docker/issues/12148
func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) {
// TODO Windows. -P is not yet supported
testRequires(c, DaemonIsLinux)
// allocate a dynamic port to get the most recent
out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top")
id := strings.TrimSpace(out)
out, _ = dockerCmd(c, "port", id, "80")
strPort := strings.Split(strings.TrimSpace(out), ":")[1]
port, err := strconv.ParseInt(strPort, 10, 64)
if err != nil {
c.Fatalf("invalid port, got: %s, error: %s", strPort, err)
}
// allocate a static port and a dynamic port together, with static port
// takes the next recent port in dynamic port range.
dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top")
}
// Regression test for #7792
func (s *DockerSuite) TestRunMountOrdering(c *check.C) {
// TODO Windows: Post TP5. Updated, but Windows does not support nested mounts currently.
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir2)
// Create a temporary tmpfs mounc.
fooDir := filepath.Join(tmpDir, "foo")
if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil {
c.Fatalf("failed to mkdir at %s - %s", fooDir, err)
}
if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run",
"-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir),
"-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir),
"-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2),
"-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir),
"busybox:latest", "sh", "-c",
"ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me")
}
// Regression test for https://github.com/docker/docker/issues/8259
func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) {
// Not applicable on Windows as Windows does not support volumes
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
linkPath := os.TempDir() + "/testlink2"
if err := os.Symlink(tmpDir, linkPath); err != nil {
c.Fatal(err)
}
defer os.RemoveAll(linkPath)
// Create first container
dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test")
// Create second container with same symlinked path
// This will fail if the referenced issue is hit with a "Volume exists" error
dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test")
}
//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container
func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) {
// While Windows supports volumes, it does not support --add-host hence
// this test is not applicable on Windows.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf")
if !strings.Contains(out, "nameserver 127.0.0.1") {
c.Fatal("/etc volume mount hides /etc/resolv.conf")
}
out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname")
if !strings.Contains(out, "test123") {
c.Fatal("/etc volume mount hides /etc/hostname")
}
out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts")
out = strings.Replace(out, "\n", " ", -1)
if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") {
c.Fatal("/etc volume mount hides /etc/hosts")
}
}
func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) {
// TODO Windows (Post RS1). Windows does not support volumes which
// are pre-populated such as is built in the dockerfile used in this test.
testRequires(c, DaemonIsLinux)
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
if _, err := buildImage("dataimage",
`FROM busybox
RUN ["mkdir", "-p", "/foo"]
RUN ["touch", "/foo/bar"]`,
true); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "--name", "test", "-v", prefix+slash+"foo", "busybox")
if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") {
c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out)
}
tmpDir := randomTmpDirPath("docker_test_bind_mount_copy_data", daemonPlatform)
if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") {
c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out)
}
}
func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) {
// just run with unknown image
cmd := exec.Command(dockerBinary, "run", "asdfsg")
stdout := bytes.NewBuffer(nil)
cmd.Stdout = stdout
if err := cmd.Run(); err == nil {
c.Fatal("Run with unknown image should fail")
}
if stdout.Len() != 0 {
c.Fatalf("Stdout contains output from pull: %s", stdout)
}
}
func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
testRequires(c, SameHostDaemon)
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
if _, err := buildImage("run_volumes_clean_paths",
`FROM busybox
VOLUME `+prefix+`/foo/`,
true); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths")
out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash)
if err != errMountNotFound {
c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out)
}
out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`)
c.Assert(err, check.IsNil)
if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) {
c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out)
}
out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash)
if err != errMountNotFound {
c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out)
}
out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar")
c.Assert(err, check.IsNil)
if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) {
c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out)
}
}
// Regression test for #3631
func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) {
// TODO Windows: This should be able to run on Windows if can find an
// alternate to /dev/zero and /dev/stdout.
testRequires(c, DaemonIsLinux)
cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv")
stdout, err := cont.StdoutPipe()
if err != nil {
c.Fatal(err)
}
if err := cont.Start(); err != nil {
c.Fatal(err)
}
n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil)
if err != nil {
c.Fatal(err)
}
expected := 2 * 1024 * 2000
if n != expected {
c.Fatalf("Expected %d, got %d", expected, n)
}
}
func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) {
// TODO Windows: -P is not currently supported. Also network
// settings are not propagated back.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top")
id := strings.TrimSpace(out)
portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports")
var ports nat.PortMap
if err := json.Unmarshal([]byte(portstr), &ports); err != nil {
c.Fatal(err)
}
for port, binding := range ports {
portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0])
if portnum < 3000 || portnum > 3003 {
c.Fatalf("Port %d is out of range ", portnum)
}
if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 {
c.Fatalf("Port is not mapped for the port %s", port)
}
}
}
func (s *DockerSuite) TestRunExposePort(c *check.C) {
out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox")
c.Assert(err, checker.NotNil, check.Commentf("--expose with an invalid port should error out"))
c.Assert(out, checker.Contains, "invalid range format for --expose")
}
func (s *DockerSuite) TestRunModeIpcHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
hostIpc, err := os.Readlink("/proc/1/ns/ipc")
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc")
out = strings.Trim(out, "\n")
if hostIpc != out {
c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out)
}
out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc")
out = strings.Trim(out, "\n")
if hostIpc == out {
c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out)
}
}
func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top")
id := strings.TrimSpace(out)
state := inspectField(c, id, "State.Running")
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid1 := inspectField(c, id, "State.Pid")
parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1))
if err != nil {
c.Fatal(err)
}
out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc")
out = strings.Trim(out, "\n")
if parentContainerIpc != out {
c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out)
}
catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test")
if catOutput != "test" {
c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput)
}
// check that /dev/mqueue is actually of mqueue type
grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts")
if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") {
c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput)
}
lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue")
lsOutput = strings.Trim(lsOutput, "\n")
if lsOutput != "toto" {
c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput)
}
}
func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top")
if !strings.Contains(out, "abcd1234") || err == nil {
c.Fatalf("run IPC from a non exists container should with correct error out")
}
}
func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "create", "busybox")
id := strings.TrimSpace(out)
out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox")
if err == nil {
c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err)
}
}
func (s *DockerSuite) TestRunModePIDContainer(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "top")
id := strings.TrimSpace(out)
state := inspectField(c, id, "State.Running")
if state != "true" {
c.Fatal("Container state is 'not running'")
}
pid1 := inspectField(c, id, "State.Pid")
parentContainerPid, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid1))
if err != nil {
c.Fatal(err)
}
out, _ = dockerCmd(c, "run", fmt.Sprintf("--pid=container:%s", id), "busybox", "readlink", "/proc/self/ns/pid")
out = strings.Trim(out, "\n")
if parentContainerPid != out {
c.Fatalf("PID different with --pid=container:%s %s != %s\n", id, parentContainerPid, out)
}
}
func (s *DockerSuite) TestRunModePIDContainerNotExists(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-d", "--pid", "container:abcd1234", "busybox", "top")
if !strings.Contains(out, "abcd1234") || err == nil {
c.Fatalf("run PID from a non exists container should with correct error out")
}
}
func (s *DockerSuite) TestRunModePIDContainerNotRunning(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "create", "busybox")
id := strings.TrimSpace(out)
out, _, err := dockerCmdWithError("run", fmt.Sprintf("--pid=container:%s", id), "busybox")
if err == nil {
c.Fatalf("Run container with pid mode container should fail with non running container: %s\n%s", out, err)
}
}
func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top")
defer os.Remove("/dev/mqueue/toto")
defer os.Remove("/dev/shm/test")
volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm")
c.Assert(err, checker.IsNil)
if volPath != "/dev/shm" {
c.Fatalf("volumePath should have been /dev/shm, was %s", volPath)
}
out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test")
if out != "test" {
c.Fatalf("Output of /dev/shm/test expected test but found: %s", out)
}
// Check that the mq was created
if _, err := os.Stat("/dev/mqueue/toto"); err != nil {
c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error())
}
}
func (s *DockerSuite) TestContainerNetworkMode(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
id := strings.TrimSpace(out)
c.Assert(waitRun(id), check.IsNil)
pid1 := inspectField(c, id, "State.Pid")
parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1))
if err != nil {
c.Fatal(err)
}
out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net")
out = strings.Trim(out, "\n")
if parentContainerNet != out {
c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out)
}
}
func (s *DockerSuite) TestRunModePIDHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
hostPid, err := os.Readlink("/proc/1/ns/pid")
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid")
out = strings.Trim(out, "\n")
if hostPid != out {
c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out)
}
out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid")
out = strings.Trim(out, "\n")
if hostPid == out {
c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out)
}
}
func (s *DockerSuite) TestRunModeUTSHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux)
hostUTS, err := os.Readlink("/proc/1/ns/uts")
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts")
out = strings.Trim(out, "\n")
if hostUTS != out {
c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out)
}
out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts")
out = strings.Trim(out, "\n")
if hostUTS == out {
c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out)
}
out, _ = dockerCmdWithFail(c, "run", "-h=name", "--uts=host", "busybox", "ps")
c.Assert(out, checker.Contains, runconfig.ErrConflictUTSHostname.Error())
}
func (s *DockerSuite) TestRunTLSVerify(c *check.C) {
// Remote daemons use TLS and this test is not applicable when TLS is required.
testRequires(c, SameHostDaemon)
if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 {
c.Fatalf("Should have worked: %v:\n%v", err, out)
}
// Regardless of whether we specify true or false we need to
// test to make sure tls is turned on if --tlsverify is specified at all
result := dockerCmdWithResult("--tlsverify=false", "ps")
result.Assert(c, icmd.Expected{ExitCode: 1, Err: "error during connect"})
result = dockerCmdWithResult("--tlsverify=true", "ps")
result.Assert(c, icmd.Expected{ExitCode: 1, Err: "cert"})
}
func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) {
// TODO Windows. Once moved to libnetwork/CNM, this may be able to be
// re-instated.
testRequires(c, DaemonIsLinux)
// first find allocator current position
out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top")
id := strings.TrimSpace(out)
out, _ = dockerCmd(c, "port", id)
out = strings.TrimSpace(out)
if out == "" {
c.Fatal("docker port command output is empty")
}
out = strings.Split(out, ":")[1]
lastPort, err := strconv.Atoi(out)
if err != nil {
c.Fatal(err)
}
port := lastPort + 1
l, err := net.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
c.Fatal(err)
}
defer l.Close()
out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top")
id = strings.TrimSpace(out)
dockerCmd(c, "port", id)
}
func (s *DockerSuite) TestRunTTYWithPipe(c *check.C) {
errChan := make(chan error)
go func() {
defer close(errChan)
cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true")
if _, err := cmd.StdinPipe(); err != nil {
errChan <- err
return
}
expected := "the input device is not a TTY"
if runtime.GOOS == "windows" {
expected += ". If you are using mintty, try prefixing the command with 'winpty'"
}
if out, _, err := runCommandWithOutput(cmd); err == nil {
errChan <- fmt.Errorf("run should have failed")
return
} else if !strings.Contains(out, expected) {
errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected)
return
}
}()
select {
case err := <-errChan:
c.Assert(err, check.IsNil)
case <-time.After(30 * time.Second):
c.Fatal("container is running but should have failed")
}
}
func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) {
addr := "00:16:3E:08:00:50"
args := []string{"run", "--mac-address", addr}
expected := addr
if daemonPlatform != "windows" {
args = append(args, "busybox", "ifconfig")
} else {
args = append(args, WindowsBaseImage, "ipconfig", "/all")
expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1)
}
if out, _ := dockerCmd(c, args...); !strings.Contains(out, expected) {
c.Fatalf("Output should have contained %q: %s", expected, out)
}
}
func (s *DockerSuite) TestRunNetHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
hostNet, err := os.Readlink("/proc/1/ns/net")
if err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net")
out = strings.Trim(out, "\n")
if hostNet != out {
c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out)
}
out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net")
out = strings.Trim(out, "\n")
if hostNet == out {
c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out)
}
}
func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) {
// TODO Windows. As Windows networking evolves and converges towards
// CNM, this test may be possible to enable on Windows.
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true")
dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true")
}
func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) {
// Not applicable on Windows as uses Unix-specific capabilities
testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace)
hostNet, err := os.Readlink("/proc/1/ns/net")
if err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top")
out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net")
out = strings.Trim(out, "\n")
if hostNet != out {
c.Fatalf("Container should have host network namespace")
}
}
func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) {
// TODO Windows. This may be possible to enable in the future. However,
// Windows does not currently support --expose, or populate the network
// settings seen through inspect.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top")
id := strings.TrimSpace(out)
portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports")
var ports nat.PortMap
err := json.Unmarshal([]byte(portstr), &ports)
c.Assert(err, checker.IsNil, check.Commentf("failed to unmarshal: %v", portstr))
for port, binding := range ports {
portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0])
if portnum < 3000 || portnum > 3003 {
c.Fatalf("Port %d is out of range ", portnum)
}
if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 {
c.Fatal("Port is not mapped for the port "+port, out)
}
}
}
func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) {
runSleepingContainer(c, "--name=testrunsetdefaultrestartpolicy")
out := inspectField(c, "testrunsetdefaultrestartpolicy", "HostConfig.RestartPolicy.Name")
if out != "no" {
c.Fatalf("Set default restart policy failed")
}
}
func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) {
out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false")
timeout := 10 * time.Second
if daemonPlatform == "windows" {
timeout = 120 * time.Second
}
id := strings.TrimSpace(string(out))
if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil {
c.Fatal(err)
}
count := inspectField(c, id, "RestartCount")
if count != "3" {
c.Fatalf("Container was restarted %s times, expected %d", count, 3)
}
MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount")
if MaximumRetryCount != "3" {
c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3")
}
}
func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) {
dockerCmd(c, "run", "--rm", "busybox", "touch", "/file")
}
func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) {
// Not applicable on Windows which does not support --read-only
testRequires(c, DaemonIsLinux, UserNamespaceROMount)
testPriv := true
// don't test privileged mode subtest if user namespaces enabled
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
testPriv = false
}
testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me")
}
func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) {
// Not applicable on Windows due to use of Unix specific functionality, plus
// the use of --read-only which is not supported.
testRequires(c, DaemonIsLinux, UserNamespaceROMount)
// Ensure we have not broken writing /dev/pts
out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount")
if status != 0 {
c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.")
}
expected := "type devpts (rw,"
if !strings.Contains(string(out), expected) {
c.Fatalf("expected output to contain %s but contains %s", expected, out)
}
}
func testReadOnlyFile(c *check.C, testPriv bool, filenames ...string) {
touch := "touch " + strings.Join(filenames, " ")
out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "sh", "-c", touch)
c.Assert(err, checker.NotNil)
for _, f := range filenames {
expected := "touch: " + f + ": Read-only file system"
c.Assert(out, checker.Contains, expected)
}
if !testPriv {
return
}
out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "sh", "-c", touch)
c.Assert(err, checker.NotNil)
for _, f := range filenames {
expected := "touch: " + f + ": Read-only file system"
c.Assert(out, checker.Contains, expected)
}
}
func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) {
// Not applicable on Windows which does not support --link
testRequires(c, DaemonIsLinux, UserNamespaceROMount)
dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top")
out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts")
if !strings.Contains(string(out), "testlinked") {
c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled")
}
}
func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDNSFlag(c *check.C) {
// Not applicable on Windows which does not support either --read-only or --dns.
testRequires(c, DaemonIsLinux, UserNamespaceROMount)
out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf")
if !strings.Contains(string(out), "1.1.1.1") {
c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used")
}
}
func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) {
// Not applicable on Windows which does not support --read-only
testRequires(c, DaemonIsLinux, UserNamespaceROMount)
out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts")
if !strings.Contains(string(out), "testreadonly") {
c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used")
}
}
func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) {
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
runSleepingContainer(c, "--name=voltest", "-v", prefix+"/foo")
runSleepingContainer(c, "--name=restarter", "--volumes-from", "voltest")
// Remove the main volume container and restart the consuming container
dockerCmd(c, "rm", "-f", "voltest")
// This should not fail since the volumes-from were already applied
dockerCmd(c, "restart", "restarter")
}
// run container with --rm should remove container if exit code != 0
func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) {
name := "flowers"
out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists")
if err == nil {
c.Fatal("Expected docker run to fail", out, err)
}
out, err = getAllContainers()
if err != nil {
c.Fatal(out, err)
}
if out != "" {
c.Fatal("Expected not to have containers", out)
}
}
func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) {
name := "sparkles"
out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound")
if err == nil {
c.Fatal("Expected docker run to fail", out, err)
}
out, err = getAllContainers()
if err != nil {
c.Fatal(out, err)
}
if out != "" {
c.Fatal("Expected not to have containers", out)
}
}
func (s *DockerSuite) TestRunPIDHostWithChildIsKillable(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux, NotUserNamespace)
name := "ibuildthecloud"
dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi")
c.Assert(waitRun(name), check.IsNil)
errchan := make(chan error)
go func() {
if out, _, err := dockerCmdWithError("kill", name); err != nil {
errchan <- fmt.Errorf("%v:\n%s", err, out)
}
close(errchan)
}()
select {
case err := <-errchan:
c.Assert(err, check.IsNil)
case <-time.After(5 * time.Second):
c.Fatal("Kill container timed out")
}
}
func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) {
// TODO Windows. This may be possible to enable once Windows supports
// memory limits on containers
testRequires(c, DaemonIsLinux)
// this memory limit is 1 byte less than the min, which is 4MB
// https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22
out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox")
if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") {
c.Fatalf("expected run to fail when using too low a memory limit: %q", out)
}
}
func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
_, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version")
if err == nil || code == 0 {
c.Fatal("standard container should not be able to write to /proc/asound")
}
}
func (s *DockerSuite) TestRunReadProcTimer(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats")
if code != 0 {
return
}
if err != nil {
c.Fatal(err)
}
if strings.Trim(out, "\n ") != "" {
c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out)
}
}
func (s *DockerSuite) TestRunReadProcLatency(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
// some kernels don't have this configured so skip the test if this file is not found
// on the host running the tests.
if _, err := os.Stat("/proc/latency_stats"); err != nil {
c.Skip("kernel doesn't have latency_stats configured")
return
}
out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats")
if code != 0 {
return
}
if err != nil {
c.Fatal(err)
}
if strings.Trim(out, "\n ") != "" {
c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out)
}
}
func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace)
testReadPaths := []string{
"/proc/latency_stats",
"/proc/timer_stats",
"/proc/kcore",
}
for i, filePath := range testReadPaths {
name := fmt.Sprintf("procsieve-%d", i)
shellCmd := fmt.Sprintf("exec 3<%s", filePath)
out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd)
if exitCode != 0 {
return
}
if err != nil {
c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err)
}
}
}
func (s *DockerSuite) TestMountIntoProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
_, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true")
if err == nil || code == 0 {
c.Fatal("container should not be able to mount into /proc")
}
}
func (s *DockerSuite) TestMountIntoSys(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
testRequires(c, NotUserNamespace)
dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true")
}
func (s *DockerSuite) TestRunUnshareProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace)
// In this test goroutines are used to run test cases in parallel to prevent the test from taking a long time to run.
errChan := make(chan error)
go func() {
name := "acidburn"
out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount")
if err == nil ||
!(strings.Contains(strings.ToLower(out), "permission denied") ||
strings.Contains(strings.ToLower(out), "operation not permitted")) {
errChan <- fmt.Errorf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err)
} else {
errChan <- nil
}
}()
go func() {
name := "cereal"
out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc")
if err == nil ||
!(strings.Contains(strings.ToLower(out), "mount: cannot mount none") ||
strings.Contains(strings.ToLower(out), "permission denied") ||
strings.Contains(strings.ToLower(out), "operation not permitted")) {
errChan <- fmt.Errorf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err)
} else {
errChan <- nil
}
}()
/* Ensure still fails if running privileged with the default policy */
go func() {
name := "crashoverride"
out, _, err := dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp=unconfined", "--security-opt", "apparmor=docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc")
if err == nil ||
!(strings.Contains(strings.ToLower(out), "mount: cannot mount none") ||
strings.Contains(strings.ToLower(out), "permission denied") ||
strings.Contains(strings.ToLower(out), "operation not permitted")) {
errChan <- fmt.Errorf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err)
} else {
errChan <- nil
}
}()
for i := 0; i < 3; i++ {
err := <-errChan
if err != nil {
c.Fatal(err)
}
}
}
func (s *DockerSuite) TestRunPublishPort(c *check.C) {
// TODO Windows: This may be possible once Windows moves to libnetwork and CNM
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top")
out, _ := dockerCmd(c, "port", "test")
out = strings.Trim(out, "\r\n")
if out != "" {
c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out)
}
}
// Issue #10184.
func (s *DockerSuite) TestDevicePermissions(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
const permissions = "crw-rw-rw-"
out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse")
if status != 0 {
c.Fatalf("expected status 0, got %d", status)
}
if !strings.HasPrefix(out, permissions) {
c.Fatalf("output should begin with %q, got %q", permissions, out)
}
}
func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok")
if actual := strings.Trim(out, "\r\n"); actual != "ok" {
c.Fatalf("expected output ok received %s", actual)
}
}
// https://github.com/docker/docker/pull/14498
func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) {
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true")
// TODO Windows: Temporary check - remove once TP5 support is dropped
if daemonPlatform != "windows" || windowsDaemonKV >= 14350 {
dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true")
}
dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true")
if daemonPlatform != "windows" {
mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test")
c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point"))
if mRO.RW {
c.Fatalf("Expected RO volume was RW")
}
}
mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test")
c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point"))
if !mRW.RW {
c.Fatalf("Expected RW volume was RO")
}
}
func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace)
testWritePaths := []string{
/* modprobe and core_pattern should both be denied by generic
* policy of denials for /proc/sys/kernel. These files have been
* picked to be checked as they are particularly sensitive to writes */
"/proc/sys/kernel/modprobe",
"/proc/sys/kernel/core_pattern",
"/proc/sysrq-trigger",
"/proc/kcore",
}
for i, filePath := range testWritePaths {
name := fmt.Sprintf("writeprocsieve-%d", i)
shellCmd := fmt.Sprintf("exec 3>%s", filePath)
out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd)
if code != 0 {
return
}
if err != nil {
c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err)
}
}
}
func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
expected := "test123"
filename := createTmpFile(c, expected)
defer os.Remove(filename)
nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"}
for i := range nwfiles {
actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i])
if actual != expected {
c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual)
}
}
}
func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux)
filename := createTmpFile(c, "test123")
defer os.Remove(filename)
nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"}
for i := range nwfiles {
_, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i])
if err == nil || exitCode == 0 {
c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode)
}
}
}
func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, SameHostDaemon, DaemonIsLinux, UserNamespaceROMount)
filename := createTmpFile(c, "test123")
defer os.Remove(filename)
nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"}
for i := range nwfiles {
_, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i])
if exitCode != 0 {
c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode)
}
}
for i := range nwfiles {
_, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i])
if err == nil || exitCode == 0 {
c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode)
}
}
}
func (s *DockerTrustSuite) TestTrustedRun(c *check.C) {
// Windows does not support this functionality
testRequires(c, DaemonIsLinux)
repoName := s.setupTrustedImage(c, "trusted-run")
// Try run
runCmd := exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("Error running trusted run: %s\n%s\n", err, out)
}
if !strings.Contains(string(out), "Tagging") {
c.Fatalf("Missing expected output on trusted push:\n%s", out)
}
dockerCmd(c, "rmi", repoName)
// Try untrusted run to ensure we pushed the tag to the registry
runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName)
s.trustedCmd(runCmd)
out, _, err = runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("Error running trusted run: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Status: Downloaded") {
c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out)
}
}
func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) {
// Windows does not support this functionality
testRequires(c, DaemonIsLinux)
repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL)
// tag the image and upload it to the private registry
dockerCmd(c, "tag", "busybox", repoName)
dockerCmd(c, "push", repoName)
dockerCmd(c, "rmi", repoName)
// Try trusted run on untrusted tag
runCmd := exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err := runCommandWithOutput(runCmd)
if err == nil {
c.Fatalf("Error expected when running trusted run with:\n%s", out)
}
if !strings.Contains(string(out), "does not have trust data for") {
c.Fatalf("Missing expected output on trusted run:\n%s", out)
}
}
func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) {
// Windows does not support this functionality
testRequires(c, DaemonIsLinux)
c.Skip("Currently changes system time, causing instability")
repoName := s.setupTrustedImage(c, "trusted-run-expired")
// Certificates have 10 years of expiration
elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11)
runAtDifferentDate(elevenYearsFromNow, func() {
// Try run
runCmd := exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err := runCommandWithOutput(runCmd)
if err == nil {
c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out)
}
if !strings.Contains(string(out), "could not validate the path to a trusted root") {
c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out)
}
})
runAtDifferentDate(elevenYearsFromNow, func() {
// Try run
runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName)
s.trustedCmd(runCmd)
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Status: Downloaded") {
c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out)
}
})
}
func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) {
// Windows does not support this functionality
testRequires(c, DaemonIsLinux)
repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL)
evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir")
if err != nil {
c.Fatalf("Failed to create local temp dir")
}
// tag the image and upload it to the private registry
dockerCmd(c, "tag", "busybox", repoName)
pushCmd := exec.Command(dockerBinary, "push", repoName)
s.trustedCmd(pushCmd)
out, _, err := runCommandWithOutput(pushCmd)
if err != nil {
c.Fatalf("Error running trusted push: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Signing and pushing trust metadata") {
c.Fatalf("Missing expected output on trusted push:\n%s", out)
}
dockerCmd(c, "rmi", repoName)
// Try run
runCmd := exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err = runCommandWithOutput(runCmd)
if err != nil {
c.Fatalf("Error running trusted run: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Tagging") {
c.Fatalf("Missing expected output on trusted push:\n%s", out)
}
dockerCmd(c, "rmi", repoName)
// Kill the notary server, start a new "evil" one.
s.not.Close()
s.not, err = newTestNotary(c)
if err != nil {
c.Fatalf("Restarting notary server failed.")
}
// In order to make an evil server, lets re-init a client (with a different trust dir) and push new data.
// tag an image and upload it to the private registry
dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName)
// Push up to the new server
pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName)
s.trustedCmd(pushCmd)
out, _, err = runCommandWithOutput(pushCmd)
if err != nil {
c.Fatalf("Error running trusted push: %s\n%s", err, out)
}
if !strings.Contains(string(out), "Signing and pushing trust metadata") {
c.Fatalf("Missing expected output on trusted push:\n%s", out)
}
// Now, try running with the original client from this new trust server. This should fail because the new root is invalid.
runCmd = exec.Command(dockerBinary, "run", repoName)
s.trustedCmd(runCmd)
out, _, err = runCommandWithOutput(runCmd)
if err == nil {
c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out)
}
if !strings.Contains(out, "could not rotate trust to a new trusted root") {
c.Fatalf("Missing expected output on trusted run:\n%s", out)
}
}
func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux, SameHostDaemon)
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
id := strings.TrimSpace(out)
c.Assert(waitRun(id), check.IsNil)
pid1 := inspectField(c, id, "State.Pid")
_, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1))
if err != nil {
c.Fatal(err)
}
}
func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux)
// Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace
// itself, but pid>1 should not be able to trace pid1.
_, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net")
if exitCode == 0 {
c.Fatal("ptrace was not successfully restricted by AppArmor")
}
}
func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor)
_, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net")
if exitCode != 0 {
c.Fatal("ptrace of self failed.")
}
}
func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace)
_, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo")
if exitCode == 0 {
// If our test failed, attempt to repair the host system...
_, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo")
if exitCode == 0 {
c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.")
}
}
}
func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$")
}
// run create container failed should clean up the container
func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) {
// TODO Windows. This may be possible to enable once link is supported
testRequires(c, DaemonIsLinux)
name := "unique_name"
_, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox")
c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!"))
containerID, err := inspectFieldWithError(name, "Id")
c.Assert(err, checker.NotNil, check.Commentf("Expected not to have this container: %s!", containerID))
c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID))
}
func (s *DockerSuite) TestRunNamedVolume(c *check.C) {
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar")
out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar")
c.Assert(strings.TrimSpace(out), check.Equals, "hello")
out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar")
c.Assert(strings.TrimSpace(out), check.Equals, "hello")
}
func (s *DockerSuite) TestRunWithUlimits(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n")
ul := strings.TrimSpace(out)
if ul != "42" {
c.Fatalf("expected `ulimit -n` to be 42, got %s", ul)
}
}
func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
cgroupParent := "test"
name := "cgroup-test"
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}
func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
cgroupParent := "/cgroup-parent/test"
name := "cgroup-test"
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}
// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /.
func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST"
cleanCgroupParent := "SHOULD_NOT_EXIST"
name := "cgroup-invalid-test"
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
// XXX: This may include a daemon crash.
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
// We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue.
if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) {
c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!")
}
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cleanCgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}
// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /.
func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
testRequires(c, DaemonIsLinux)
cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST"
cleanCgroupParent := "/SHOULD_NOT_EXIST"
name := "cgroup-absolute-invalid-test"
out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup")
if err != nil {
// XXX: This may include a daemon crash.
c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
}
// We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue.
if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) {
c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!")
}
cgroupPaths := parseCgroupPaths(string(out))
if len(cgroupPaths) == 0 {
c.Fatalf("unexpected output - %q", string(out))
}
id, err := getIDByName(name)
c.Assert(err, check.IsNil)
expectedCgroup := path.Join(cleanCgroupParent, id)
found := false
for _, path := range cgroupPaths {
if strings.HasSuffix(path, expectedCgroup) {
found = true
break
}
}
if !found {
c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths)
}
}
func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) {
// Not applicable on Windows as uses Unix specific functionality
// --read-only + userns has remount issues
testRequires(c, DaemonIsLinux, NotUserNamespace)
filename := "/sys/fs/cgroup/devices/test123"
out, _, err := dockerCmdWithError("run", "busybox", "touch", filename)
if err == nil {
c.Fatal("expected cgroup mount point to be read-only, touch file should fail")
}
expected := "Read-only file system"
if !strings.Contains(out, expected) {
c.Fatalf("expected output from failure to contain %s but contains %s", expected, out)
}
}
func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) {
// Not applicable on Windows which does not support --net=container
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true")
if err == nil || !strings.Contains(out, "cannot join own network") {
c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out)
}
}
func (s *DockerSuite) TestRunContainerNetModeWithDNSMacHosts(c *check.C) {
// Not applicable on Windows which does not support --net=container
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top")
if err != nil {
c.Fatalf("failed to run container: %v, output: %q", err, out)
}
out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) {
c.Fatalf("run --net=container with --dns should error out")
}
out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) {
c.Fatalf("run --net=container with --mac-address should error out")
}
out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) {
c.Fatalf("run --net=container with --add-host should error out")
}
}
func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) {
// Not applicable on Windows which does not support --net=container
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top")
out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) {
c.Fatalf("run --net=container with -p should error out")
}
out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) {
c.Fatalf("run --net=container with -P should error out")
}
out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox")
if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) {
c.Fatalf("run --net=container with --expose should error out")
}
}
func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) {
// Not applicable on Windows which does not support --net=container or --link
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top")
dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top")
dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top")
dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top")
dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top")
}
func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) {
// TODO Windows: This may be possible to convert.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up")
var (
count = 0
parts = strings.Split(out, "\n")
)
for _, l := range parts {
if l != "" {
count++
}
}
if count != 1 {
c.Fatalf("Wrong interface count in container %d", count)
}
if !strings.HasPrefix(out, "1: lo") {
c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out)
}
}
// Issue #4681
func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) {
if daemonPlatform == "windows" {
dockerCmd(c, "run", "--net=none", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1")
} else {
dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1")
}
}
func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) {
// Windows does not support --net=container
testRequires(c, DaemonIsLinux, ExecSupport)
dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top")
out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname")
out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname")
if out1 != out {
c.Fatal("containers with shared net namespace should have same hostname")
}
}
func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) {
// TODO Windows: Network settings are not currently propagated. This may
// be resolved in the future with the move to libnetwork and CNM.
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top")
id := strings.TrimSpace(out)
res := inspectField(c, id, "NetworkSettings.Networks.none.IPAddress")
if res != "" {
c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res)
}
}
func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) {
// Not applicable as Windows does not support --net=host
testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace)
dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top")
dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top")
dockerCmd(c, "stop", "first")
dockerCmd(c, "stop", "second")
}
func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork")
dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first")
}
func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
// Create 2 networks using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2")
// Run and connect containers to testnetwork1
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Check connectivity between containers in testnetwork2
dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1")
// Connect containers to testnetwork2
dockerCmd(c, "network", "connect", "testnetwork2", "first")
dockerCmd(c, "network", "connect", "testnetwork2", "second")
// Check connectivity between containers
dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2")
}
func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
// Create 2 networks using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2")
// Run 1 container in testnetwork1 and another in testnetwork2
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Check Isolation between containers : ping must fail
_, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second")
c.Assert(err, check.NotNil)
// Connect first container to testnetwork2
dockerCmd(c, "network", "connect", "testnetwork2", "first")
// ping must succeed now
_, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second")
c.Assert(err, check.IsNil)
// Disconnect first container from testnetwork2
dockerCmd(c, "network", "disconnect", "testnetwork2", "first")
// ping must fail again
_, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second")
c.Assert(err, check.NotNil)
}
func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
// Create 2 networks using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
// Run and connect containers to testnetwork1
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Network delete with active containers must fail
_, _, err := dockerCmdWithError("network", "rm", "testnetwork1")
c.Assert(err, check.NotNil)
dockerCmd(c, "stop", "first")
_, _, err = dockerCmdWithError("network", "rm", "testnetwork1")
c.Assert(err, check.NotNil)
}
func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm)
// Create 2 networks using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2")
// Run and connect containers to testnetwork1
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Check connectivity between containers in testnetwork2
dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1")
// Connect containers to testnetwork2
dockerCmd(c, "network", "connect", "testnetwork2", "first")
dockerCmd(c, "network", "connect", "testnetwork2", "second")
// Check connectivity between containers
dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2")
// Stop second container and test ping failures on both networks
dockerCmd(c, "stop", "second")
_, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1")
c.Assert(err, check.NotNil)
_, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2")
c.Assert(err, check.NotNil)
// Start second container and connectivity must be restored on both networks
dockerCmd(c, "start", "second")
dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1")
dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2")
}
func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
// Run a container with --net=host
dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// Create a network using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
// Connecting to the user defined network must fail
_, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first")
c.Assert(err, check.NotNil)
}
func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// Run second container in first container's network namespace
dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Create a network using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
// Connecting to the user defined network must fail
out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second")
c.Assert(err, check.NotNil)
c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error())
}
func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top")
c.Assert(waitRun("first"), check.IsNil)
// Create a network using bridge driver
dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1")
// Connecting to the user defined network must fail
out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first")
c.Assert(err, check.NotNil)
c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error())
// create a container connected to testnetwork1
dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top")
c.Assert(waitRun("second"), check.IsNil)
// Connect second container to none network. it must fail as well
_, _, err = dockerCmdWithError("network", "connect", "none", "second")
c.Assert(err, check.NotNil)
}
// #11957 - stdin with no tty does not exit if stdin is not closed even though container exited
func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) {
cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true")
in, err := cmd.StdinPipe()
c.Assert(err, check.IsNil)
defer in.Close()
stdout := bytes.NewBuffer(nil)
cmd.Stdout = stdout
cmd.Stderr = stdout
c.Assert(cmd.Start(), check.IsNil)
waitChan := make(chan error)
go func() {
waitChan <- cmd.Wait()
}()
select {
case err := <-waitChan:
c.Assert(err, check.IsNil, check.Commentf(stdout.String()))
case <-time.After(30 * time.Second):
c.Fatal("timeout waiting for command to exit")
}
}
func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) {
// TODO Windows: This needs validation (error out) in the daemon.
testRequires(c, DaemonIsLinux)
out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true")
c.Assert(err, check.NotNil)
expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n"
if !(strings.Contains(out, expected) || exitCode == 125) {
c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode)
}
}
func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) {
// TODO Windows: This needs validation (error out) in the daemon.
testRequires(c, DaemonIsLinux)
out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true")
c.Assert(err, check.NotNil)
expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n"
if !(strings.Contains(out, expected) || exitCode == 125) {
c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode)
}
}
// TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127'
func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) {
name := "testNonExecutableCmd"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "foo")
_, exit, _ := runCommandWithOutput(runCmd)
stateExitCode := findContainerExitCode(c, name)
if !(exit == 127 && strings.Contains(stateExitCode, "127")) {
c.Fatalf("Run non-executable command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode)
}
}
// TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127.
func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) {
name := "testNonExistingCmd"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/bin/foo")
_, exit, _ := runCommandWithOutput(runCmd)
stateExitCode := findContainerExitCode(c, name)
if !(exit == 127 && strings.Contains(stateExitCode, "127")) {
c.Fatalf("Run non-existing command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode)
}
}
// TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or
// 127 on Windows. The difference is that in Windows, the container must be started
// as that's when the check is made (and yes, by its design...)
func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) {
expected := 126
if daemonPlatform == "windows" {
expected = 127
}
name := "testCmdCannotBeInvoked"
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/etc")
_, exit, _ := runCommandWithOutput(runCmd)
stateExitCode := findContainerExitCode(c, name)
if !(exit == expected && strings.Contains(stateExitCode, strconv.Itoa(expected))) {
c.Fatalf("Run cmd that cannot be invoked should have errored with code %d, but we got exit: %d, State.ExitCode: %s", expected, exit, stateExitCode)
}
}
// TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image'
func (s *DockerSuite) TestRunNonExistingImage(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "foo")
out, exit, err := runCommandWithOutput(runCmd)
if !(err != nil && exit == 125 && strings.Contains(out, "Unable to find image")) {
c.Fatalf("Run non-existing image should have errored with 'Unable to find image' code 125, but we got out: %s, exit: %d, err: %s", out, exit, err)
}
}
// TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed
func (s *DockerSuite) TestDockerFails(c *check.C) {
runCmd := exec.Command(dockerBinary, "run", "-foo", "busybox")
out, exit, err := runCommandWithOutput(runCmd)
if !(err != nil && exit == 125) {
c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err)
}
}
// TestRunInvalidReference invokes docker run with a bad reference.
func (s *DockerSuite) TestRunInvalidReference(c *check.C) {
out, exit, _ := dockerCmdWithError("run", "busybox@foo")
if exit == 0 {
c.Fatalf("expected non-zero exist code; received %d", exit)
}
if !strings.Contains(out, "Error parsing reference") {
c.Fatalf(`Expected "Error parsing reference" in output; got: %s`, out)
}
}
// Test fix for issue #17854
func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) {
// Not applicable on Windows as it does not support Linux uid/gid ownership
testRequires(c, DaemonIsLinux)
name := "testetcfileownership"
_, err := buildImage(name,
`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN chown dockerio:dockerio /etc`,
true)
if err != nil {
c.Fatal(err)
}
// Test that dockerio ownership of /etc is retained at runtime
out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc")
out = strings.TrimSpace(out)
if out != "dockerio:dockerio" {
c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out)
}
}
func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) {
testRequires(c, DaemonIsLinux)
expected := "642"
out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj")
oomScoreAdj := strings.TrimSpace(out)
if oomScoreAdj != "642" {
c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj)
}
}
func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true")
c.Assert(err, check.NotNil)
expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]."
if !strings.Contains(out, expected) {
c.Fatalf("Expected output to contain %q, got %q instead", expected, out)
}
out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true")
c.Assert(err, check.NotNil)
expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]."
if !strings.Contains(out, expected) {
c.Fatalf("Expected output to contain %q, got %q instead", expected, out)
}
}
func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) {
// Volume propagation is linux only. Also it creates directories for
// bind mounting, so needs to be same host.
testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace)
// Prepare a source directory to bind mount
tmpDir, err := ioutil.TempDir("", "volume-source")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil {
c.Fatal(err)
}
// Convert this directory into a shared mount point so that we do
// not rely on propagation properties of parent mount.
cmd := exec.Command("mount", "--bind", tmpDir, tmpDir)
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir)
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1")
// Make sure a bind mount under a shared volume propagated to host.
if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted {
c.Fatalf("Bind mount under shared volume did not propagate to host")
}
mount.Unmount(path.Join(tmpDir, "mnt1"))
}
func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) {
// Volume propagation is linux only. Also it creates directories for
// bind mounting, so needs to be same host.
testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace)
// Prepare a source directory to bind mount
tmpDir, err := ioutil.TempDir("", "volume-source")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil {
c.Fatal(err)
}
// Prepare a source directory with file in it. We will bind mount this
// directory and see if file shows up.
tmpDir2, err := ioutil.TempDir("", "volume-source2")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir2)
if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil {
c.Fatal(err)
}
// Convert this directory into a shared mount point so that we do
// not rely on propagation properties of parent mount.
cmd := exec.Command("mount", "--bind", tmpDir, tmpDir)
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir)
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top")
// Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside
// container then contents of tmpDir2/slave-testfile should become
// visible at "/volume-dest/mnt1/slave-testfile"
cmd = exec.Command("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1"))
if _, err = runCommand(cmd); err != nil {
c.Fatal(err)
}
out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile")
mount.Unmount(path.Join(tmpDir, "mnt1"))
if out != "Test" {
c.Fatalf("Bind mount under slave volume did not propagate to container")
}
}
func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
out, exitCode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile")
c.Assert(exitCode, checker.Not(checker.Equals), 0)
c.Assert(out, checker.Contains, "invalid mount config")
}
func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) {
testRequires(c, DaemonIsLinux)
testImg := "testvolumecopy"
_, err := buildImage(testImg, `
FROM busybox
RUN mkdir -p /foo && echo hello > /foo/hello
`, true)
c.Assert(err, check.IsNil)
dockerCmd(c, "run", "-v", "foo:/foo", testImg)
out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello")
c.Assert(strings.TrimSpace(out), check.Equals, "hello")
}
func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) {
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
dockerCmd(c, "volume", "create", "test")
dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
dockerCmd(c, "volume", "inspect", "test")
out, _ := dockerCmd(c, "volume", "ls", "-q")
c.Assert(strings.TrimSpace(out), checker.Equals, "test")
dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
dockerCmd(c, "rm", "-fv", "test")
dockerCmd(c, "volume", "inspect", "test")
out, _ = dockerCmd(c, "volume", "ls", "-q")
c.Assert(strings.TrimSpace(out), checker.Equals, "test")
}
func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) {
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
dockerCmd(c, "volume", "create", "test")
dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true")
// Remove the parent so there are not other references to the volumes
dockerCmd(c, "rm", "-f", "parent")
// now remove the child and ensure the named volume (and only the named volume) still exists
dockerCmd(c, "rm", "-fv", "child")
dockerCmd(c, "volume", "inspect", "test")
out, _ := dockerCmd(c, "volume", "ls", "-q")
c.Assert(strings.TrimSpace(out), checker.Equals, "test")
}
func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) {
nroutines, err := getGoroutineNumber()
c.Assert(err, checker.IsNil)
runSleepingContainer(c, "--name=test", "-p", "8000:8000")
// Wait until container is fully up and running
c.Assert(waitRun("test"), check.IsNil)
out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true")
// We will need the following `inspect` to diagnose the issue if test fails (#21247)
out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test")
out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail")
c.Assert(err, checker.NotNil, check.Commentf("Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2))
// check for windows error as well
// TODO Windows Post TP5. Fix the error message string
c.Assert(strings.Contains(string(out), "port is already allocated") ||
strings.Contains(string(out), "were not connected because a duplicate name exists") ||
strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") ||
strings.Contains(string(out), "HNS failed with error : The object already exists"), checker.Equals, true, check.Commentf("Output: %s", out))
dockerCmd(c, "rm", "-f", "test")
// NGoroutines is not updated right away, so we need to wait before failing
c.Assert(waitForGoroutines(nroutines), checker.IsNil)
}
// Test for one character directory name case (#20122)
func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) {
testRequires(c, DaemonIsLinux)
out, _ := dockerCmd(c, "run", "-v", "/tmp/q:/foo", "busybox", "sh", "-c", "find /foo")
c.Assert(strings.TrimSpace(out), checker.Equals, "/foo")
}
func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume
_, err := buildImage("volumecopy",
`FROM busybox
RUN mkdir /foo && echo hello > /foo/bar
CMD cat /foo/bar`,
true,
)
c.Assert(err, checker.IsNil)
dockerCmd(c, "volume", "create", "test")
// test with the nocopy flag
out, _, err := dockerCmdWithError("run", "-v", "test:/foo:nocopy", "volumecopy")
c.Assert(err, checker.NotNil, check.Commentf(out))
// test default behavior which is to copy for non-binds
out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy")
c.Assert(strings.TrimSpace(out), checker.Equals, "hello")
// error out when the volume is already populated
out, _, err = dockerCmdWithError("run", "-v", "test:/foo:copy", "volumecopy")
c.Assert(err, checker.NotNil, check.Commentf(out))
// do not error out when copy isn't explicitly set even though it's already populated
out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy")
c.Assert(strings.TrimSpace(out), checker.Equals, "hello")
// do not allow copy modes on volumes-from
dockerCmd(c, "run", "--name=test", "-v", "/foo", "busybox", "true")
out, _, err = dockerCmdWithError("run", "--volumes-from=test:copy", "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf(out))
out, _, err = dockerCmdWithError("run", "--volumes-from=test:nocopy", "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf(out))
// do not allow copy modes on binds
out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:copy", "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf(out))
out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf(out))
}
func (s *DockerSuite) TestRunTooLongHostname(c *check.C) {
// Test case in #21445
hostname1 := "this-is-a-way-too-long-hostname-but-it-should-give-a-nice-error.local"
out, _, err := dockerCmdWithError("run", "--hostname", hostname1, "busybox", "echo", "test")
c.Assert(err, checker.NotNil, check.Commentf("Expected docker run to fail!"))
c.Assert(out, checker.Contains, "invalid hostname format:", check.Commentf("Expected to have 'invalid hostname format:' in the output, get: %s!", out))
// Additional test cases
validHostnames := map[string]string{
"hostname": "hostname",
"host-name": "host-name",
"hostname123": "hostname123",
"123hostname": "123hostname",
"hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error",
}
for hostname := range validHostnames {
dockerCmd(c, "run", "--hostname", hostname, "busybox", "echo", "test")
}
invalidHostnames := map[string]string{
"^hostname": "invalid hostname format: ^hostname",
"hostname%": "invalid hostname format: hostname%",
"host&name": "invalid hostname format: host&name",
"-hostname": "invalid hostname format: -hostname",
"host_name": "invalid hostname format: host_name",
"hostname-of-64-bytes-long-should-be-invalid-and-be-with-an-error": "invalid hostname format: hostname-of-64-bytes-long-should-be-invalid-and-be-with-an-error",
}
for hostname, expectedError := range invalidHostnames {
out, _, err = dockerCmdWithError("run", "--hostname", hostname, "busybox", "echo", "test")
c.Assert(err, checker.NotNil, check.Commentf("Expected docker run to fail!"))
c.Assert(out, checker.Contains, expectedError, check.Commentf("Expected to have '%s' in the output, get: %s!", expectedError, out))
}
}
// Test case for #21976
func (s *DockerSuite) TestRunDNSInHostMode(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
expectedOutput := "nameserver 127.0.0.1"
expectedWarning := "Localhost DNS setting"
out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
c.Assert(stderr, checker.Contains, expectedWarning, check.Commentf("Expected warning on stderr about localhost resolver, but got %q", stderr))
expectedOutput = "nameserver 1.2.3.4"
out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
expectedOutput = "search example.com"
out, _ = dockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
expectedOutput = "options timeout:3"
out, _ = dockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
expectedOutput1 := "nameserver 1.2.3.4"
expectedOutput2 := "search example.com"
expectedOutput3 := "options timeout:3"
out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf")
c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out))
c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out))
c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out))
}
// Test case for #21976
func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
expectedOutput := "1.2.3.4\textra"
out, _ := dockerCmd(c, "run", "--add-host=extra:1.2.3.4", "--net=host", "busybox", "cat", "/etc/hosts")
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
}
func (s *DockerSuite) TestRunRmAndWait(c *check.C) {
dockerCmd(c, "run", "--name=test", "--rm", "-d", "busybox", "sh", "-c", "sleep 3;exit 2")
out, code, err := dockerCmdWithError("wait", "test")
c.Assert(err, checker.IsNil, check.Commentf("out: %s; exit code: %d", out, code))
c.Assert(out, checker.Equals, "2\n", check.Commentf("exit code: %d", code))
c.Assert(code, checker.Equals, 0)
}
// Test case for #23498
func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "test-entrypoint"
dockerfile := `FROM busybox
ADD entrypoint.sh /entrypoint.sh
RUN chmod 755 /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
CMD echo foobar`
ctx, err := fakeContext(dockerfile, map[string]string{
"entrypoint.sh": `#!/bin/sh
echo "I am an entrypoint"
exec "$@"`,
})
c.Assert(err, check.IsNil)
defer ctx.Close()
_, err = buildImageFromContext(name, ctx, true)
c.Assert(err, check.IsNil)
out, _ := dockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo")
c.Assert(strings.TrimSpace(out), check.Equals, "foo")
// CMD will be reset as well (the same as setting a custom entrypoint)
_, _, err = dockerCmdWithError("run", "--entrypoint=", "-t", name)
c.Assert(err, check.NotNil)
c.Assert(err.Error(), checker.Contains, "No command specified")
}
func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) {
c.Assert(s.d.StartWithBusybox("--debug", "--default-ulimit=nofile=65535"), checker.IsNil)
name := "test-A"
_, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top")
c.Assert(err, checker.IsNil)
c.Assert(s.d.waitRun(name), check.IsNil)
out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Contains, "[nofile=65535:65535]")
name = "test-B"
_, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top")
c.Assert(err, checker.IsNil)
c.Assert(s.d.waitRun(name), check.IsNil)
out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
c.Assert(err, checker.IsNil)
c.Assert(out, checker.Contains, "[nofile=42:42]")
}
func (s *DockerSuite) TestRunStoppedLoggingDriverNoLeak(c *check.C) {
nroutines, err := getGoroutineNumber()
c.Assert(err, checker.IsNil)
out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true")
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, "Failed to initialize logging driver", check.Commentf("error should be about logging driver, got output %s", out))
// NGoroutines is not updated right away, so we need to wait before failing
c.Assert(waitForGoroutines(nroutines), checker.IsNil)
}
// Handles error conditions for --credentialspec. Validating E2E success cases
// requires additional infrastructure (AD for example) on CI servers.
func (s *DockerSuite) TestRunCredentialSpecFailures(c *check.C) {
testRequires(c, DaemonIsWindows)
attempts := []struct{ value, expectedError string }{
{"rubbish", "invalid credential spec security option - value must be prefixed file:// or registry://"},
{"rubbish://", "invalid credential spec security option - value must be prefixed file:// or registry://"},
{"file://", "no value supplied for file:// credential spec security option"},
{"registry://", "no value supplied for registry:// credential spec security option"},
{`file://c:\blah.txt`, "path cannot be absolute"},
{`file://doesnotexist.txt`, "The system cannot find the file specified"},
}
for _, attempt := range attempts {
_, _, err := dockerCmdWithError("run", "--security-opt=credentialspec="+attempt.value, "busybox", "true")
c.Assert(err, checker.NotNil, check.Commentf("%s expected non-nil err", attempt.value))
c.Assert(err.Error(), checker.Contains, attempt.expectedError, check.Commentf("%s expected %s got %s", attempt.value, attempt.expectedError, err))
}
}
// Windows specific test to validate credential specs with a well-formed spec.
// Note it won't actually do anything in CI configuration with the spec, but
// it should not fail to run a container.
func (s *DockerSuite) TestRunCredentialSpecWellFormed(c *check.C) {
testRequires(c, DaemonIsWindows, SameHostDaemon)
validCS := readFile(`fixtures\credentialspecs\valid.json`, c)
writeFile(filepath.Join(dockerBasePath, `credentialspecs\valid.json`), validCS, c)
dockerCmd(c, "run", `--security-opt=credentialspec=file://valid.json`, "busybox", "true")
}
// Windows specific test to ensure that a servicing app container is started
// if necessary once a container exits. It does this by forcing a no-op
// servicing event and verifying the event from Hyper-V-Compute
func (s *DockerSuite) TestRunServicingContainer(c *check.C) {
testRequires(c, DaemonIsWindows, SameHostDaemon)
out, _ := dockerCmd(c, "run", "-d", WindowsBaseImage, "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255")
containerID := strings.TrimSpace(out)
err := waitExited(containerID, 60*time.Second)
c.Assert(err, checker.IsNil)
cmd := exec.Command("powershell", "echo", `(Get-WinEvent -ProviderName "Microsoft-Windows-Hyper-V-Compute" -FilterXPath 'Event[System[EventID=2010]]' -MaxEvents 1).Message`)
out2, _, err := runCommandWithOutput(cmd)
c.Assert(err, checker.IsNil)
c.Assert(out2, checker.Contains, `"Servicing":true`, check.Commentf("Servicing container does not appear to have been started: %s", out2))
c.Assert(out2, checker.Contains, `Windows Container (Servicing)`, check.Commentf("Didn't find 'Windows Container (Servicing): %s", out2))
c.Assert(out2, checker.Contains, containerID+"_servicing", check.Commentf("Didn't find '%s_servicing': %s", containerID+"_servicing", out2))
}
func (s *DockerSuite) TestRunDuplicateMount(c *check.C) {
testRequires(c, SameHostDaemon, DaemonIsLinux)
tmpFile, err := ioutil.TempFile("", "touch-me")
c.Assert(err, checker.IsNil)
defer tmpFile.Close()
data := "touch-me-foo-bar\n"
if _, err := tmpFile.Write([]byte(data)); err != nil {
c.Fatal(err)
}
name := "test"
out, _ := dockerCmd(c, "run", "--name", name, "-v", "/tmp:/tmp", "-v", "/tmp:/tmp", "busybox", "sh", "-c", "cat "+tmpFile.Name()+" && ls /")
c.Assert(out, checker.Not(checker.Contains), "tmp:")
c.Assert(out, checker.Contains, data)
out = inspectFieldJSON(c, name, "Config.Volumes")
c.Assert(out, checker.Contains, "null")
}
func (s *DockerSuite) TestRunMount(c *check.C) {
testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace)
// mnt1, mnt2, and testCatFooBar are commonly used in multiple test cases
tmpDir, err := ioutil.TempDir("", "mount")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
mnt1, mnt2 := path.Join(tmpDir, "mnt1"), path.Join(tmpDir, "mnt2")
if err := os.Mkdir(mnt1, 0755); err != nil {
c.Fatal(err)
}
if err := os.Mkdir(mnt2, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(path.Join(mnt1, "test1"), []byte("test1"), 0644); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(path.Join(mnt2, "test2"), []byte("test2"), 0644); err != nil {
c.Fatal(err)
}
testCatFooBar := func(cName string) error {
out, _ := dockerCmd(c, "exec", cName, "cat", "/foo/test1")
if out != "test1" {
return fmt.Errorf("%s not mounted on /foo", mnt1)
}
out, _ = dockerCmd(c, "exec", cName, "cat", "/bar/test2")
if out != "test2" {
return fmt.Errorf("%s not mounted on /bar", mnt2)
}
return nil
}
type testCase struct {
equivalents [][]string
valid bool
// fn should be nil if valid==false
fn func(cName string) error
}
cases := []testCase{
{
equivalents: [][]string{
{
"--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1),
"--mount", fmt.Sprintf("type=bind,src=%s,dst=/bar", mnt2),
},
{
"--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1),
"--mount", fmt.Sprintf("type=bind,src=%s,target=/bar", mnt2),
},
{
"--volume", fmt.Sprintf("%s:/foo", mnt1),
"--mount", fmt.Sprintf("type=bind,src=%s,target=/bar", mnt2),
},
},
valid: true,
fn: testCatFooBar,
},
{
equivalents: [][]string{
{
"--mount", fmt.Sprintf("type=volume,src=%s,dst=/foo", mnt1),
"--mount", fmt.Sprintf("type=volume,src=%s,dst=/bar", mnt2),
},
{
"--mount", fmt.Sprintf("type=volume,src=%s,dst=/foo", mnt1),
"--mount", fmt.Sprintf("type=volume,src=%s,target=/bar", mnt2),
},
},
valid: false,
},
{
equivalents: [][]string{
{
"--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1),
"--mount", fmt.Sprintf("type=volume,src=%s,dst=/bar", mnt2),
},
{
"--volume", fmt.Sprintf("%s:/foo", mnt1),
"--mount", fmt.Sprintf("type=volume,src=%s,target=/bar", mnt2),
},
},
valid: false,
fn: testCatFooBar,
},
{
equivalents: [][]string{
{
"--read-only",
"--mount", "type=volume,dst=/bar",
},
},
valid: true,
fn: func(cName string) error {
_, _, err := dockerCmdWithError("exec", cName, "touch", "/bar/icanwritehere")
return err
},
},
{
equivalents: [][]string{
{
"--read-only",
"--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1),
"--mount", "type=volume,dst=/bar",
},
{
"--read-only",
"--volume", fmt.Sprintf("%s:/foo", mnt1),
"--mount", "type=volume,dst=/bar",
},
},
valid: true,
fn: func(cName string) error {
out, _ := dockerCmd(c, "exec", cName, "cat", "/foo/test1")
if out != "test1" {
return fmt.Errorf("%s not mounted on /foo", mnt1)
}
_, _, err := dockerCmdWithError("exec", cName, "touch", "/bar/icanwritehere")
return err
},
},
{
equivalents: [][]string{
{
"--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1),
"--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt2),
},
{
"--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1),
"--mount", fmt.Sprintf("type=bind,src=%s,target=/foo", mnt2),
},
{
"--volume", fmt.Sprintf("%s:/foo", mnt1),
"--mount", fmt.Sprintf("type=bind,src=%s,target=/foo", mnt2),
},
},
valid: false,
},
{
equivalents: [][]string{
{
"--volume", fmt.Sprintf("%s:/foo", mnt1),
"--mount", fmt.Sprintf("type=volume,src=%s,target=/foo", mnt2),
},
},
valid: false,
},
{
equivalents: [][]string{
{
"--mount", "type=volume,target=/foo",
"--mount", "type=volume,target=/foo",
},
},
valid: false,
},
}
for i, testCase := range cases {
for j, opts := range testCase.equivalents {
cName := fmt.Sprintf("mount-%d-%d", i, j)
_, _, err := dockerCmdWithError(append([]string{"run", "-i", "-d", "--name", cName},
append(opts, []string{"busybox", "top"}...)...)...)
if testCase.valid {
c.Assert(err, check.IsNil,
check.Commentf("got error while creating a container with %v (%s)", opts, cName))
c.Assert(testCase.fn(cName), check.IsNil,
check.Commentf("got error while executing test for %v (%s)", opts, cName))
dockerCmd(c, "rm", "-f", cName)
} else {
c.Assert(err, checker.NotNil,
check.Commentf("got nil while creating a container with %v (%s)", opts, cName))
}
}
}
}
func (s *DockerSuite) TestRunWindowsWithCPUCount(c *check.C) {
testRequires(c, DaemonIsWindows)
out, _ := dockerCmd(c, "run", "--cpu-count=1", "--name", "test", "busybox", "echo", "testing")
c.Assert(strings.TrimSpace(out), checker.Equals, "testing")
out = inspectField(c, "test", "HostConfig.CPUCount")
c.Assert(out, check.Equals, "1")
}
func (s *DockerSuite) TestRunWindowsWithCPUShares(c *check.C) {
testRequires(c, DaemonIsWindows)
out, _ := dockerCmd(c, "run", "--cpu-shares=1000", "--name", "test", "busybox", "echo", "testing")
c.Assert(strings.TrimSpace(out), checker.Equals, "testing")
out = inspectField(c, "test", "HostConfig.CPUShares")
c.Assert(out, check.Equals, "1000")
}
func (s *DockerSuite) TestRunWindowsWithCPUPercent(c *check.C) {
testRequires(c, DaemonIsWindows)
out, _ := dockerCmd(c, "run", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing")
c.Assert(strings.TrimSpace(out), checker.Equals, "testing")
out = inspectField(c, "test", "HostConfig.CPUPercent")
c.Assert(out, check.Equals, "80")
}
func (s *DockerSuite) TestRunProcessIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) {
testRequires(c, DaemonIsWindows, IsolationIsProcess)
out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing")
c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded")
c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded")
c.Assert(strings.TrimSpace(out), checker.Contains, "testing")
out = inspectField(c, "test", "HostConfig.CPUCount")
c.Assert(out, check.Equals, "1")
out = inspectField(c, "test", "HostConfig.CPUShares")
c.Assert(out, check.Equals, "0")
out = inspectField(c, "test", "HostConfig.CPUPercent")
c.Assert(out, check.Equals, "0")
}
func (s *DockerSuite) TestRunHypervIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) {
testRequires(c, DaemonIsWindows, IsolationIsHyperv)
out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing")
c.Assert(strings.TrimSpace(out), checker.Contains, "testing")
out = inspectField(c, "test", "HostConfig.CPUCount")
c.Assert(out, check.Equals, "1")
out = inspectField(c, "test", "HostConfig.CPUShares")
c.Assert(out, check.Equals, "1000")
out = inspectField(c, "test", "HostConfig.CPUPercent")
c.Assert(out, check.Equals, "80")
}
// Test for #25099
func (s *DockerSuite) TestRunEmptyEnv(c *check.C) {
testRequires(c, DaemonIsLinux)
expectedOutput := "invalid environment variable:"
out, _, err := dockerCmdWithError("run", "-e", "", "busybox", "true")
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, expectedOutput)
out, _, err = dockerCmdWithError("run", "-e", "=", "busybox", "true")
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, expectedOutput)
out, _, err = dockerCmdWithError("run", "-e", "=foo", "busybox", "true")
c.Assert(err, checker.NotNil)
c.Assert(out, checker.Contains, expectedOutput)
}
// #28658
func (s *DockerSuite) TestSlowStdinClosing(c *check.C) {
name := "testslowstdinclosing"
repeat := 3 // regression happened 50% of the time
for i := 0; i < repeat; i++ {
cmd := exec.Command(dockerBinary, "run", "--rm", "--name", name, "-i", "busybox", "cat")
cmd.Stdin = &delayedReader{}
done := make(chan error, 1)
go func() {
_, err := runCommand(cmd)
done <- err
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("running container timed out") // cleanup in teardown
case err := <-done:
c.Assert(err, checker.IsNil)
}
}
}
type delayedReader struct{}
func (s *delayedReader) Read([]byte) (int, error) {
time.Sleep(500 * time.Millisecond)
return 0, io.EOF
}
| [
"\"DOCKER_REMAP_ROOT\""
]
| []
| [
"DOCKER_REMAP_ROOT"
]
| [] | ["DOCKER_REMAP_ROOT"] | go | 1 | 0 | |
test/integration/crcsuite/crcsuite.go | // +build integration
/*
Copyright (C) 2018 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package crcsuite
import (
"crypto/tls"
"fmt"
"github.com/DATA-DOG/godog"
"github.com/DATA-DOG/godog/gherkin"
"io/ioutil"
"net/http"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"time"
clicumber "github.com/code-ready/clicumber/testsuite"
"github.com/code-ready/crc/pkg/crc/oc"
)
var (
CRCHome string
CRCBinary string
bundleEmbedded bool
bundleName string
bundleURL string
bundleVersion string
pullSecretFile string
goPath string
)
// FeatureContext defines godog.Suite steps for the test suite.
func FeatureContext(s *godog.Suite) {
// CRC related steps
s.Step(`^removing CRC home directory succeeds$`,
RemoveCRCHome)
s.Step(`^starting CRC with default bundle (succeeds|fails)$`,
StartCRCWithDefaultBundleSucceedsOrFails)
s.Step(`^starting CRC with default bundle and nameserver "(.*)" (succeeds|fails)$`,
StartCRCWithDefaultBundleAndNameServerSucceedsOrFails)
s.Step(`^setting config property "(.*)" to value "(.*)" (succeeds|fails)$`,
SetConfigPropertyToValueSucceedsOrFails)
s.Step(`^unsetting config property "(.*)" (succeeds|fails)$`,
UnsetConfigPropertySucceedsOrFails)
s.Step(`^login to the oc cluster (succeeds|fails)$`,
LoginToOcClusterSucceedsOrFails)
s.Step(`^with up to "(\d+)" retries with wait period of "(\d*(?:ms|s|m))" all cluster operators are running$`,
CheckClusterOperatorsWithRetry)
s.Step(`^with up to "(\d+)" retries with wait period of "(\d*(?:ms|s|m))" http response from "(.*)" has status code "(\d+)"$`,
CheckHTTPResponseWithRetry)
s.Step(`^with up to "(\d+)" retries with wait period of "(\d*(?:ms|s|m))" command "(.*)" output (should match|matches|should not match|does not match) "(.*)"$`,
CheckOutputMatchWithRetry)
s.Step(`stdout (?:should contain|contains) "(.*)" if bundle (is|is not) embedded$`,
StdoutContainsIfBundleEmbeddedOrNot)
// CRC file operations
s.Step(`^file "([^"]*)" exists in CRC home folder$`,
FileExistsInCRCHome)
s.Step(`"(JSON|YAML)" config file "(.*)" in CRC home folder (contains|does not contain) key "(.*)" with value matching "(.*)"$`,
ConfigFileInCRCHomeContainsKeyMatchingValue)
s.Step(`"(JSON|YAML)" config file "(.*)" in CRC home folder (contains|does not contain) key "(.*)"$`,
ConfigFileInCRCHomeContainsKey)
s.Step(`removing file "(.*)" from CRC home folder succeeds$`,
DeleteFileFromCRCHome)
s.BeforeSuite(func() {
usr, _ := user.Current()
CRCHome = filepath.Join(usr.HomeDir, ".crc")
// init CRCBinary if no location provided by user
if CRCBinary == "" {
fmt.Println("Expecting the CRC binary to be in $HOME/go/bin.")
usr, _ := user.Current()
CRCBinary = filepath.Join(usr.HomeDir, "go", "bin")
}
// put CRC binary location on top of PATH
path := os.Getenv("PATH")
newPath := fmt.Sprintf("%s%c%s", CRCBinary, os.PathListSeparator, path)
err := os.Setenv("PATH", newPath)
if err != nil {
fmt.Println("Could not put CRC location on top of PATH")
os.Exit(1)
}
if bundleURL == "embedded" {
fmt.Println("Expecting the bundle to be embedded in the CRC binary.")
bundleEmbedded = true
if bundleVersion == "" {
fmt.Println("User must specify --bundle-version if bundle is embedded")
os.Exit(1)
}
// assume default hypervisor
var hypervisor string
switch platform := runtime.GOOS; platform {
case "darwin":
hypervisor = "hyperkit"
case "linux":
hypervisor = "libvirt"
case "windows":
hypervisor = "hyperv"
default:
fmt.Printf("Unsupported OS: %s", platform)
os.Exit(1)
}
bundleName = fmt.Sprintf("crc_%s_%s.crcbundle", hypervisor, bundleVersion)
} else {
bundleEmbedded = false
_, bundleName = filepath.Split(bundleURL)
}
if pullSecretFile == "" {
fmt.Println("User must specify the pull secret file via --pull-secret-file flag.")
os.Exit(1)
}
// remove $HOME/.crc
err = RemoveCRCHome()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
})
s.AfterSuite(func() {
err := DeleteCRC()
if err != nil {
fmt.Printf("Could not delete CRC VM: %s.", err)
}
})
s.BeforeFeature(func(this *gherkin.Feature) {
// copy data/config files to test dir
CopyFilesToTestDir()
if bundleEmbedded == false {
if _, err := os.Stat(bundleName); os.IsNotExist(err) {
// Obtain the bundle to current dir
fmt.Println("Obtaining bundle...")
bundle, err := DownloadBundle(bundleURL, ".")
if err != nil {
fmt.Printf("Failed to obtain CRC bundle, %v\n", err)
os.Exit(1)
}
fmt.Println("Using bundle:", bundle)
} else if err != nil {
fmt.Printf("Unexpected error obtaining the bundle %v.\n", bundleName)
os.Exit(1)
} else {
fmt.Println("Using existing bundle:", bundleName)
}
}
})
}
func CheckClusterOperatorsWithRetry(retryCount int, retryWait string) error {
retryDuration, err := time.ParseDuration(retryWait)
if err != nil {
return err
}
ocConfig := oc.UseOCWithConfig("crc")
for i := 0; i < retryCount; i++ {
s, err := oc.GetClusterOperatorStatus(ocConfig)
if err != nil {
return err
}
if s.Available == true {
return nil
}
time.Sleep(retryDuration)
}
return fmt.Errorf("Some cluster operators are still not running.\n")
}
func CheckHTTPResponseWithRetry(retryCount int, retryWait string, address string, expectedStatusCode int) error {
retryDuration, err := time.ParseDuration(retryWait)
if err != nil {
return err
}
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
var resp *http.Response
for i := 0; i < retryCount; i++ {
resp, err = client.Get(address)
if err != nil {
return err
}
if resp.StatusCode == expectedStatusCode {
return nil
}
time.Sleep(retryDuration)
}
return fmt.Errorf("Got %d as Status Code instead of expected %d.", resp.StatusCode, expectedStatusCode)
}
func CheckOutputMatchWithRetry(retryCount int, retryTime string, command string, expected string, expectedOutput string) error {
retryDuration, err := time.ParseDuration(retryTime)
if err != nil {
return err
}
var match_err error
for i := 0; i < retryCount; i++ {
exec_err := clicumber.ExecuteCommand(command)
if exec_err == nil {
if strings.Contains(expected, " not ") {
match_err = clicumber.CommandReturnShouldNotMatch("stdout", expectedOutput)
} else {
match_err = clicumber.CommandReturnShouldMatch("stdout", expectedOutput)
}
if match_err == nil {
return nil
}
}
time.Sleep(retryDuration)
}
return match_err
}
func DeleteFileFromCRCHome(fileName string) error {
theFile := filepath.Join(CRCHome, fileName)
if _, err := os.Stat(theFile); os.IsNotExist(err) {
return nil
}
err := clicumber.DeleteFile(theFile)
if err != nil {
fmt.Errorf("Error deleting file %v", theFile)
}
return nil
}
func FileExistsInCRCHome(fileName string) error {
theFile := filepath.Join(CRCHome, fileName)
_, err := os.Stat(theFile)
if os.IsNotExist(err) {
return fmt.Errorf("file %s does not exists, error: %v ", theFile, err)
}
return err
}
func ConfigFileInCRCHomeContainsKeyMatchingValue(format string, configFile string, condition string, keyPath string, expectedValue string) error {
if expectedValue == "current bundle" {
expectedValue = fmt.Sprintf(".*%s", bundleName)
}
configPath := filepath.Join(CRCHome, configFile)
config, err := clicumber.GetFileContent(configPath)
if err != nil {
return err
}
keyValue, err := clicumber.GetConfigKeyValue([]byte(config), format, keyPath)
if err != nil {
return err
}
matches, err := clicumber.PerformRegexMatch(expectedValue, keyValue)
if err != nil {
return err
} else if (condition == "contains") && !matches {
return fmt.Errorf("For key '%s' config contains unexpected value '%s'", keyPath, keyValue)
} else if (condition == "does not contain") && matches {
return fmt.Errorf("For key '%s' config contains value '%s', which it should not contain", keyPath, keyValue)
}
return nil
}
func ConfigFileInCRCHomeContainsKey(format string, configFile string, condition string, keyPath string) error {
configPath := filepath.Join(CRCHome, configFile)
config, err := clicumber.GetFileContent(configPath)
if err != nil {
return err
}
keyValue, err := clicumber.GetConfigKeyValue([]byte(config), format, keyPath)
if err != nil {
return err
}
if (condition == "contains") && (keyValue == "<nil>") {
return fmt.Errorf("Config does not contain any value for key %s", keyPath)
} else if (condition == "does not contain") && (keyValue != "<nil>") {
return fmt.Errorf("Config contains key %s with assigned value: %s", keyPath, keyValue)
}
return nil
}
func LoginToOcClusterSucceedsOrFails(expected string) error {
bundle := strings.Split(bundleName, ".crcbundle")[0]
pswdLocation := filepath.Join(CRCHome, "cache", bundle, "kubeadmin-password")
pswd, err := ioutil.ReadFile(pswdLocation)
if err != nil {
return err
}
cmd := fmt.Sprintf("oc login --insecure-skip-tls-verify -u kubeadmin -p %s https://api.crc.testing:6443", pswd)
err = clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
func StartCRCWithDefaultBundleSucceedsOrFails(expected string) error {
var cmd string
var extraBundleArgs string
if bundleEmbedded == false {
extraBundleArgs = fmt.Sprintf("-b %s", bundleName)
}
cmd = fmt.Sprintf("crc start -p '%s' %s --log-level debug", pullSecretFile, extraBundleArgs)
err := clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
func StartCRCWithDefaultBundleAndNameServerSucceedsOrFails(nameserver string, expected string) error {
var extraBundleArgs string
if bundleEmbedded == false {
extraBundleArgs = fmt.Sprintf("-b %s", bundleName)
}
var cmd string
cmd = fmt.Sprintf("crc start -n %s -p '%s' %s --log-level debug", nameserver, pullSecretFile, extraBundleArgs)
err := clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
func StdoutContainsIfBundleEmbeddedOrNot(value string, expected string) error {
if expected == "is" { // expect embedded
if bundleEmbedded { // really embedded
return clicumber.CommandReturnShouldContain("stdout", value)
} else {
return clicumber.CommandReturnShouldNotContain("stdout", value)
}
} else { // expect not embedded
if !bundleEmbedded { // really not embedded
return clicumber.CommandReturnShouldContain("stdout", value)
} else {
return clicumber.CommandReturnShouldNotContain("stdout", value)
}
}
}
func SetConfigPropertyToValueSucceedsOrFails(property string, value string, expected string) error {
if value == "current bundle" {
if bundleEmbedded {
value = filepath.Join(CRCHome, "cache", bundleName)
} else {
value = bundleName
}
}
cmd := "crc config set " + property + " " + value
err := clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
func UnsetConfigPropertySucceedsOrFails(property string, expected string) error {
cmd := "crc config unset " + property
err := clicumber.ExecuteCommandSucceedsOrFails(cmd, expected)
return err
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
modules/setting/setting.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2017 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package setting
import (
"encoding/base64"
"io"
"io/ioutil"
"net"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"code.gitea.io/git"
"code.gitea.io/gitea/modules/generate"
"code.gitea.io/gitea/modules/log"
_ "code.gitea.io/gitea/modules/minwinsvc" // import minwinsvc for windows services
"code.gitea.io/gitea/modules/user"
"github.com/Unknwon/com"
_ "github.com/go-macaron/cache/memcache" // memcache plugin for cache
_ "github.com/go-macaron/cache/redis"
_ "github.com/go-macaron/session/couchbase" // couchbase plugin for session store
_ "github.com/go-macaron/session/memcache" // memcache plugin for session store
_ "github.com/go-macaron/session/mysql" // mysql plugin for session store
_ "github.com/go-macaron/session/nodb" // nodb plugin for session store
_ "github.com/go-macaron/session/postgres" // postgres plugin for session store
_ "github.com/go-macaron/session/redis" // redis plugin for store session
shellquote "github.com/kballard/go-shellquote"
version "github.com/mcuadros/go-version"
ini "gopkg.in/ini.v1"
"strk.kbt.io/projects/go/libravatar"
)
// Scheme describes protocol types
type Scheme string
// enumerates all the scheme types
const (
HTTP Scheme = "http"
HTTPS Scheme = "https"
FCGI Scheme = "fcgi"
UnixSocket Scheme = "unix"
)
// LandingPage describes the default page
type LandingPage string
// enumerates all the landing page types
const (
LandingPageHome LandingPage = "/"
LandingPageExplore LandingPage = "/explore"
LandingPageOrganizations LandingPage = "/explore/organizations"
)
// enumerates all the types of captchas
const (
ImageCaptcha = "image"
ReCaptcha = "recaptcha"
)
// settings
var (
// AppVer settings
AppVer string
AppBuiltWith string
AppName string
AppURL string
AppSubURL string
AppSubURLDepth int // Number of slashes
AppPath string
AppDataPath string
AppWorkPath string
// Server settings
Protocol Scheme
Domain string
HTTPAddr string
HTTPPort string
LocalURL string
RedirectOtherPort bool
PortToRedirect string
OfflineMode bool
DisableRouterLog bool
CertFile string
KeyFile string
StaticRootPath string
EnableGzip bool
LandingPageURL LandingPage
UnixSocketPermission uint32
EnablePprof bool
PprofDataPath string
EnableLetsEncrypt bool
LetsEncryptTOS bool
LetsEncryptDirectory string
LetsEncryptEmail string
SSH = struct {
Disabled bool `ini:"DISABLE_SSH"`
StartBuiltinServer bool `ini:"START_SSH_SERVER"`
BuiltinServerUser string `ini:"BUILTIN_SSH_SERVER_USER"`
Domain string `ini:"SSH_DOMAIN"`
Port int `ini:"SSH_PORT"`
ListenHost string `ini:"SSH_LISTEN_HOST"`
ListenPort int `ini:"SSH_LISTEN_PORT"`
RootPath string `ini:"SSH_ROOT_PATH"`
ServerCiphers []string `ini:"SSH_SERVER_CIPHERS"`
ServerKeyExchanges []string `ini:"SSH_SERVER_KEY_EXCHANGES"`
ServerMACs []string `ini:"SSH_SERVER_MACS"`
KeyTestPath string `ini:"SSH_KEY_TEST_PATH"`
KeygenPath string `ini:"SSH_KEYGEN_PATH"`
AuthorizedKeysBackup bool `ini:"SSH_AUTHORIZED_KEYS_BACKUP"`
MinimumKeySizeCheck bool `ini:"-"`
MinimumKeySizes map[string]int `ini:"-"`
CreateAuthorizedKeysFile bool `ini:"SSH_CREATE_AUTHORIZED_KEYS_FILE"`
ExposeAnonymous bool `ini:"SSH_EXPOSE_ANONYMOUS"`
}{
Disabled: false,
StartBuiltinServer: false,
Domain: "",
Port: 22,
ServerCiphers: []string{"aes128-ctr", "aes192-ctr", "aes256-ctr", "[email protected]", "arcfour256", "arcfour128"},
ServerKeyExchanges: []string{"diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1", "ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521", "[email protected]"},
ServerMACs: []string{"[email protected]", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96"},
KeygenPath: "ssh-keygen",
}
LFS struct {
StartServer bool `ini:"LFS_START_SERVER"`
ContentPath string `ini:"LFS_CONTENT_PATH"`
JWTSecretBase64 string `ini:"LFS_JWT_SECRET"`
JWTSecretBytes []byte `ini:"-"`
HTTPAuthExpiry time.Duration `ini:"LFS_HTTP_AUTH_EXPIRY"`
}
// Security settings
InstallLock bool
SecretKey string
LogInRememberDays int
CookieUserName string
CookieRememberName string
ReverseProxyAuthUser string
ReverseProxyAuthEmail string
MinPasswordLength int
ImportLocalPaths bool
DisableGitHooks bool
// Database settings
UseSQLite3 bool
UseMySQL bool
UseMSSQL bool
UsePostgreSQL bool
UseTiDB bool
LogSQL bool
DBConnectRetries int
DBConnectBackoff time.Duration
// UI settings
UI = struct {
ExplorePagingNum int
IssuePagingNum int
RepoSearchPagingNum int
FeedMaxCommitNum int
GraphMaxCommitNum int
CodeCommentLines int
ReactionMaxUserNum int
ThemeColorMetaTag string
MaxDisplayFileSize int64
ShowUserEmail bool
DefaultTheme string
Themes []string
Admin struct {
UserPagingNum int
RepoPagingNum int
NoticePagingNum int
OrgPagingNum int
} `ini:"ui.admin"`
User struct {
RepoPagingNum int
} `ini:"ui.user"`
Meta struct {
Author string
Description string
Keywords string
} `ini:"ui.meta"`
}{
ExplorePagingNum: 20,
IssuePagingNum: 10,
RepoSearchPagingNum: 10,
FeedMaxCommitNum: 5,
GraphMaxCommitNum: 100,
CodeCommentLines: 4,
ReactionMaxUserNum: 10,
ThemeColorMetaTag: `#6cc644`,
MaxDisplayFileSize: 8388608,
DefaultTheme: `gitea`,
Themes: []string{`gitea`, `arc-green`},
Admin: struct {
UserPagingNum int
RepoPagingNum int
NoticePagingNum int
OrgPagingNum int
}{
UserPagingNum: 50,
RepoPagingNum: 50,
NoticePagingNum: 25,
OrgPagingNum: 50,
},
User: struct {
RepoPagingNum int
}{
RepoPagingNum: 15,
},
Meta: struct {
Author string
Description string
Keywords string
}{
Author: "Gitea - Git with a cup of tea",
Description: "Gitea (Git with a cup of tea) is a painless self-hosted Git service written in Go",
Keywords: "go,git,self-hosted,gitea",
},
}
// Markdown settings
Markdown = struct {
EnableHardLineBreak bool
CustomURLSchemes []string `ini:"CUSTOM_URL_SCHEMES"`
FileExtensions []string
}{
EnableHardLineBreak: false,
FileExtensions: strings.Split(".md,.markdown,.mdown,.mkd", ","),
}
// Admin settings
Admin struct {
DisableRegularOrgCreation bool
}
// Picture settings
AvatarUploadPath string
AvatarMaxWidth int
AvatarMaxHeight int
GravatarSource string
GravatarSourceURL *url.URL
DisableGravatar bool
EnableFederatedAvatar bool
LibravatarService *libravatar.Libravatar
// Log settings
LogLevel string
LogRootPath string
LogModes []string
LogConfigs []string
RedirectMacaronLog bool
// Attachment settings
AttachmentPath string
AttachmentAllowedTypes string
AttachmentMaxSize int64
AttachmentMaxFiles int
AttachmentEnabled bool
// Time settings
TimeFormat string
CSRFCookieName = "_csrf"
// Mirror settings
Mirror struct {
DefaultInterval time.Duration
MinInterval time.Duration
}
// API settings
API = struct {
EnableSwagger bool
MaxResponseItems int
DefaultPagingNum int
DefaultGitTreesPerPage int
}{
EnableSwagger: true,
MaxResponseItems: 50,
DefaultPagingNum: 30,
DefaultGitTreesPerPage: 1000,
}
OAuth2 = struct {
Enable bool
AccessTokenExpirationTime int64
RefreshTokenExpirationTime int64
JWTSecretBytes []byte `ini:"-"`
JWTSecretBase64 string `ini:"JWT_SECRET"`
}{
Enable: true,
AccessTokenExpirationTime: 3600,
RefreshTokenExpirationTime: 730,
}
U2F = struct {
AppID string
TrustedFacets []string
}{}
// Metrics settings
Metrics = struct {
Enabled bool
Token string
}{
Enabled: false,
Token: "",
}
// I18n settings
Langs []string
Names []string
dateLangs map[string]string
// Highlight settings are loaded in modules/template/highlight.go
// Other settings
ShowFooterBranding bool
ShowFooterVersion bool
ShowFooterTemplateLoadTime bool
// Global setting objects
Cfg *ini.File
CustomPath string // Custom directory path
CustomConf string
CustomPID string
ProdMode bool
RunUser string
IsWindows bool
HasRobotsTxt bool
InternalToken string // internal access token
IterateBufferSize int
// UILocation is the location on the UI, so that we can display the time on UI.
// Currently only show the default time.Local, it could be added to app.ini after UI is ready
UILocation = time.Local
)
// DateLang transforms standard language locale name to corresponding value in datetime plugin.
func DateLang(lang string) string {
name, ok := dateLangs[lang]
if ok {
return name
}
return "en"
}
func getAppPath() (string, error) {
var appPath string
var err error
if IsWindows && filepath.IsAbs(os.Args[0]) {
appPath = filepath.Clean(os.Args[0])
} else {
appPath, err = exec.LookPath(os.Args[0])
}
if err != nil {
return "", err
}
appPath, err = filepath.Abs(appPath)
if err != nil {
return "", err
}
// Note: we don't use path.Dir here because it does not handle case
// which path starts with two "/" in Windows: "//psf/Home/..."
return strings.Replace(appPath, "\\", "/", -1), err
}
func getWorkPath(appPath string) string {
workPath := ""
giteaWorkPath := os.Getenv("GITEA_WORK_DIR")
if len(giteaWorkPath) > 0 {
workPath = giteaWorkPath
} else {
i := strings.LastIndex(appPath, "/")
if i == -1 {
workPath = appPath
} else {
workPath = appPath[:i]
}
}
return strings.Replace(workPath, "\\", "/", -1)
}
func init() {
IsWindows = runtime.GOOS == "windows"
log.NewLogger(0, "console", `{"level": 0}`)
var err error
if AppPath, err = getAppPath(); err != nil {
log.Fatal(4, "Failed to get app path: %v", err)
}
AppWorkPath = getWorkPath(AppPath)
}
func forcePathSeparator(path string) {
if strings.Contains(path, "\\") {
log.Fatal(4, "Do not use '\\' or '\\\\' in paths, instead, please use '/' in all places")
}
}
// IsRunUserMatchCurrentUser returns false if configured run user does not match
// actual user that runs the app. The first return value is the actual user name.
// This check is ignored under Windows since SSH remote login is not the main
// method to login on Windows.
func IsRunUserMatchCurrentUser(runUser string) (string, bool) {
if IsWindows {
return "", true
}
currentUser := user.CurrentUsername()
return currentUser, runUser == currentUser
}
func createPIDFile(pidPath string) {
currentPid := os.Getpid()
if err := os.MkdirAll(filepath.Dir(pidPath), os.ModePerm); err != nil {
log.Fatal(4, "Failed to create PID folder: %v", err)
}
file, err := os.Create(pidPath)
if err != nil {
log.Fatal(4, "Failed to create PID file: %v", err)
}
defer file.Close()
if _, err := file.WriteString(strconv.FormatInt(int64(currentPid), 10)); err != nil {
log.Fatal(4, "Failed to write PID information: %v", err)
}
}
// CheckLFSVersion will check lfs version, if not satisfied, then disable it.
func CheckLFSVersion() {
if LFS.StartServer {
//Disable LFS client hooks if installed for the current OS user
//Needs at least git v2.1.2
binVersion, err := git.BinVersion()
if err != nil {
log.Fatal(4, "Error retrieving git version: %v", err)
}
if !version.Compare(binVersion, "2.1.2", ">=") {
LFS.StartServer = false
log.Error(4, "LFS server support needs at least Git v2.1.2")
} else {
git.GlobalCommandArgs = append(git.GlobalCommandArgs, "-c", "filter.lfs.required=",
"-c", "filter.lfs.smudge=", "-c", "filter.lfs.clean=")
}
}
}
// NewContext initializes configuration context.
// NOTE: do not print any log except error.
func NewContext() {
Cfg = ini.Empty()
CustomPath = os.Getenv("GITEA_CUSTOM")
if len(CustomPath) == 0 {
CustomPath = path.Join(AppWorkPath, "custom")
} else if !filepath.IsAbs(CustomPath) {
CustomPath = path.Join(AppWorkPath, CustomPath)
}
if len(CustomPID) > 0 {
createPIDFile(CustomPID)
}
if len(CustomConf) == 0 {
CustomConf = path.Join(CustomPath, "conf/app.ini")
} else if !filepath.IsAbs(CustomConf) {
CustomConf = path.Join(CustomPath, CustomConf)
}
if com.IsFile(CustomConf) {
if err := Cfg.Append(CustomConf); err != nil {
log.Fatal(4, "Failed to load custom conf '%s': %v", CustomConf, err)
}
} else {
log.Warn("Custom config '%s' not found, ignore this if you're running first time", CustomConf)
}
Cfg.NameMapper = ini.AllCapsUnderscore
homeDir, err := com.HomeDir()
if err != nil {
log.Fatal(4, "Failed to get home directory: %v", err)
}
homeDir = strings.Replace(homeDir, "\\", "/", -1)
LogLevel = getLogLevel("log", "LEVEL", "Info")
LogRootPath = Cfg.Section("log").Key("ROOT_PATH").MustString(path.Join(AppWorkPath, "log"))
forcePathSeparator(LogRootPath)
RedirectMacaronLog = Cfg.Section("log").Key("REDIRECT_MACARON_LOG").MustBool(false)
sec := Cfg.Section("server")
AppName = Cfg.Section("").Key("APP_NAME").MustString("Gitea: Git with a cup of tea")
Protocol = HTTP
if sec.Key("PROTOCOL").String() == "https" {
Protocol = HTTPS
CertFile = sec.Key("CERT_FILE").String()
KeyFile = sec.Key("KEY_FILE").String()
} else if sec.Key("PROTOCOL").String() == "fcgi" {
Protocol = FCGI
} else if sec.Key("PROTOCOL").String() == "unix" {
Protocol = UnixSocket
UnixSocketPermissionRaw := sec.Key("UNIX_SOCKET_PERMISSION").MustString("666")
UnixSocketPermissionParsed, err := strconv.ParseUint(UnixSocketPermissionRaw, 8, 32)
if err != nil || UnixSocketPermissionParsed > 0777 {
log.Fatal(4, "Failed to parse unixSocketPermission: %s", UnixSocketPermissionRaw)
}
UnixSocketPermission = uint32(UnixSocketPermissionParsed)
}
EnableLetsEncrypt = sec.Key("ENABLE_LETSENCRYPT").MustBool(false)
LetsEncryptTOS = sec.Key("LETSENCRYPT_ACCEPTTOS").MustBool(false)
if !LetsEncryptTOS && EnableLetsEncrypt {
log.Warn("Failed to enable Let's Encrypt due to Let's Encrypt TOS not being accepted")
EnableLetsEncrypt = false
}
LetsEncryptDirectory = sec.Key("LETSENCRYPT_DIRECTORY").MustString("https")
LetsEncryptEmail = sec.Key("LETSENCRYPT_EMAIL").MustString("")
Domain = sec.Key("DOMAIN").MustString("localhost")
HTTPAddr = sec.Key("HTTP_ADDR").MustString("0.0.0.0")
HTTPPort = sec.Key("HTTP_PORT").MustString("3000")
defaultAppURL := string(Protocol) + "://" + Domain
if (Protocol == HTTP && HTTPPort != "80") || (Protocol == HTTPS && HTTPPort != "443") {
defaultAppURL += ":" + HTTPPort
}
AppURL = sec.Key("ROOT_URL").MustString(defaultAppURL)
AppURL = strings.TrimRight(AppURL, "/") + "/"
// Check if has app suburl.
url, err := url.Parse(AppURL)
if err != nil {
log.Fatal(4, "Invalid ROOT_URL '%s': %s", AppURL, err)
}
// Suburl should start with '/' and end without '/', such as '/{subpath}'.
// This value is empty if site does not have sub-url.
AppSubURL = strings.TrimSuffix(url.Path, "/")
AppSubURLDepth = strings.Count(AppSubURL, "/")
// Check if Domain differs from AppURL domain than update it to AppURL's domain
// TODO: Can be replaced with url.Hostname() when minimal GoLang version is 1.8
urlHostname := strings.SplitN(url.Host, ":", 2)[0]
if urlHostname != Domain && net.ParseIP(urlHostname) == nil {
Domain = urlHostname
}
var defaultLocalURL string
switch Protocol {
case UnixSocket:
defaultLocalURL = "http://unix/"
case FCGI:
defaultLocalURL = AppURL
default:
defaultLocalURL = string(Protocol) + "://"
if HTTPAddr == "0.0.0.0" {
defaultLocalURL += "localhost"
} else {
defaultLocalURL += HTTPAddr
}
defaultLocalURL += ":" + HTTPPort + "/"
}
LocalURL = sec.Key("LOCAL_ROOT_URL").MustString(defaultLocalURL)
RedirectOtherPort = sec.Key("REDIRECT_OTHER_PORT").MustBool(false)
PortToRedirect = sec.Key("PORT_TO_REDIRECT").MustString("80")
OfflineMode = sec.Key("OFFLINE_MODE").MustBool()
DisableRouterLog = sec.Key("DISABLE_ROUTER_LOG").MustBool()
StaticRootPath = sec.Key("STATIC_ROOT_PATH").MustString(AppWorkPath)
AppDataPath = sec.Key("APP_DATA_PATH").MustString(path.Join(AppWorkPath, "data"))
EnableGzip = sec.Key("ENABLE_GZIP").MustBool()
EnablePprof = sec.Key("ENABLE_PPROF").MustBool(false)
PprofDataPath = sec.Key("PPROF_DATA_PATH").MustString(path.Join(AppWorkPath, "data/tmp/pprof"))
if !filepath.IsAbs(PprofDataPath) {
PprofDataPath = filepath.Join(AppWorkPath, PprofDataPath)
}
switch sec.Key("LANDING_PAGE").MustString("home") {
case "explore":
LandingPageURL = LandingPageExplore
case "organizations":
LandingPageURL = LandingPageOrganizations
default:
LandingPageURL = LandingPageHome
}
if len(SSH.Domain) == 0 {
SSH.Domain = Domain
}
SSH.RootPath = path.Join(homeDir, ".ssh")
serverCiphers := sec.Key("SSH_SERVER_CIPHERS").Strings(",")
if len(serverCiphers) > 0 {
SSH.ServerCiphers = serverCiphers
}
serverKeyExchanges := sec.Key("SSH_SERVER_KEY_EXCHANGES").Strings(",")
if len(serverKeyExchanges) > 0 {
SSH.ServerKeyExchanges = serverKeyExchanges
}
serverMACs := sec.Key("SSH_SERVER_MACS").Strings(",")
if len(serverMACs) > 0 {
SSH.ServerMACs = serverMACs
}
SSH.KeyTestPath = os.TempDir()
if err = Cfg.Section("server").MapTo(&SSH); err != nil {
log.Fatal(4, "Failed to map SSH settings: %v", err)
}
SSH.KeygenPath = sec.Key("SSH_KEYGEN_PATH").MustString("ssh-keygen")
SSH.Port = sec.Key("SSH_PORT").MustInt(22)
SSH.ListenPort = sec.Key("SSH_LISTEN_PORT").MustInt(SSH.Port)
// When disable SSH, start builtin server value is ignored.
if SSH.Disabled {
SSH.StartBuiltinServer = false
}
if !SSH.Disabled && !SSH.StartBuiltinServer {
if err := os.MkdirAll(SSH.RootPath, 0700); err != nil {
log.Fatal(4, "Failed to create '%s': %v", SSH.RootPath, err)
} else if err = os.MkdirAll(SSH.KeyTestPath, 0644); err != nil {
log.Fatal(4, "Failed to create '%s': %v", SSH.KeyTestPath, err)
}
}
SSH.MinimumKeySizeCheck = sec.Key("MINIMUM_KEY_SIZE_CHECK").MustBool()
SSH.MinimumKeySizes = map[string]int{}
minimumKeySizes := Cfg.Section("ssh.minimum_key_sizes").Keys()
for _, key := range minimumKeySizes {
if key.MustInt() != -1 {
SSH.MinimumKeySizes[strings.ToLower(key.Name())] = key.MustInt()
}
}
SSH.AuthorizedKeysBackup = sec.Key("SSH_AUTHORIZED_KEYS_BACKUP").MustBool(true)
SSH.CreateAuthorizedKeysFile = sec.Key("SSH_CREATE_AUTHORIZED_KEYS_FILE").MustBool(true)
SSH.ExposeAnonymous = sec.Key("SSH_EXPOSE_ANONYMOUS").MustBool(false)
sec = Cfg.Section("server")
if err = sec.MapTo(&LFS); err != nil {
log.Fatal(4, "Failed to map LFS settings: %v", err)
}
LFS.ContentPath = sec.Key("LFS_CONTENT_PATH").MustString(filepath.Join(AppDataPath, "lfs"))
if !filepath.IsAbs(LFS.ContentPath) {
LFS.ContentPath = filepath.Join(AppWorkPath, LFS.ContentPath)
}
LFS.HTTPAuthExpiry = sec.Key("LFS_HTTP_AUTH_EXPIRY").MustDuration(20 * time.Minute)
if LFS.StartServer {
if err := os.MkdirAll(LFS.ContentPath, 0700); err != nil {
log.Fatal(4, "Failed to create '%s': %v", LFS.ContentPath, err)
}
LFS.JWTSecretBytes = make([]byte, 32)
n, err := base64.RawURLEncoding.Decode(LFS.JWTSecretBytes, []byte(LFS.JWTSecretBase64))
if err != nil || n != 32 {
LFS.JWTSecretBase64, err = generate.NewJwtSecret()
if err != nil {
log.Fatal(4, "Error generating JWT Secret for custom config: %v", err)
return
}
// Save secret
cfg := ini.Empty()
if com.IsFile(CustomConf) {
// Keeps custom settings if there is already something.
if err := cfg.Append(CustomConf); err != nil {
log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
}
}
cfg.Section("server").Key("LFS_JWT_SECRET").SetValue(LFS.JWTSecretBase64)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
}
if err := cfg.SaveTo(CustomConf); err != nil {
log.Fatal(4, "Error saving generated JWT Secret to custom config: %v", err)
return
}
}
}
if err = Cfg.Section("oauth2").MapTo(&OAuth2); err != nil {
log.Fatal(4, "Failed to OAuth2 settings: %v", err)
return
}
if OAuth2.Enable {
OAuth2.JWTSecretBytes = make([]byte, 32)
n, err := base64.RawURLEncoding.Decode(OAuth2.JWTSecretBytes, []byte(OAuth2.JWTSecretBase64))
if err != nil || n != 32 {
OAuth2.JWTSecretBase64, err = generate.NewJwtSecret()
if err != nil {
log.Fatal(4, "error generating JWT secret: %v", err)
return
}
cfg := ini.Empty()
if com.IsFile(CustomConf) {
if err := cfg.Append(CustomConf); err != nil {
log.Error(4, "failed to load custom conf %s: %v", CustomConf, err)
return
}
}
cfg.Section("oauth2").Key("JWT_SECRET").SetValue(OAuth2.JWTSecretBase64)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
log.Fatal(4, "failed to create '%s': %v", CustomConf, err)
return
}
if err := cfg.SaveTo(CustomConf); err != nil {
log.Fatal(4, "error saving generating JWT secret to custom config: %v", err)
return
}
}
}
sec = Cfg.Section("security")
InstallLock = sec.Key("INSTALL_LOCK").MustBool(false)
SecretKey = sec.Key("SECRET_KEY").MustString("!#@FDEWREWR&*(")
LogInRememberDays = sec.Key("LOGIN_REMEMBER_DAYS").MustInt(7)
CookieUserName = sec.Key("COOKIE_USERNAME").MustString("gitea_awesome")
CookieRememberName = sec.Key("COOKIE_REMEMBER_NAME").MustString("gitea_incredible")
ReverseProxyAuthUser = sec.Key("REVERSE_PROXY_AUTHENTICATION_USER").MustString("X-WEBAUTH-USER")
ReverseProxyAuthEmail = sec.Key("REVERSE_PROXY_AUTHENTICATION_EMAIL").MustString("X-WEBAUTH-EMAIL")
MinPasswordLength = sec.Key("MIN_PASSWORD_LENGTH").MustInt(6)
ImportLocalPaths = sec.Key("IMPORT_LOCAL_PATHS").MustBool(false)
DisableGitHooks = sec.Key("DISABLE_GIT_HOOKS").MustBool(false)
InternalToken = loadInternalToken(sec)
IterateBufferSize = Cfg.Section("database").Key("ITERATE_BUFFER_SIZE").MustInt(50)
LogSQL = Cfg.Section("database").Key("LOG_SQL").MustBool(true)
DBConnectRetries = Cfg.Section("database").Key("DB_RETRIES").MustInt(10)
DBConnectBackoff = Cfg.Section("database").Key("DB_RETRY_BACKOFF").MustDuration(3 * time.Second)
sec = Cfg.Section("attachment")
AttachmentPath = sec.Key("PATH").MustString(path.Join(AppDataPath, "attachments"))
if !filepath.IsAbs(AttachmentPath) {
AttachmentPath = path.Join(AppWorkPath, AttachmentPath)
}
AttachmentAllowedTypes = strings.Replace(sec.Key("ALLOWED_TYPES").MustString("image/jpeg,image/png,application/zip,application/gzip"), "|", ",", -1)
AttachmentMaxSize = sec.Key("MAX_SIZE").MustInt64(4)
AttachmentMaxFiles = sec.Key("MAX_FILES").MustInt(5)
AttachmentEnabled = sec.Key("ENABLED").MustBool(true)
TimeFormatKey := Cfg.Section("time").Key("FORMAT").MustString("RFC1123")
TimeFormat = map[string]string{
"ANSIC": time.ANSIC,
"UnixDate": time.UnixDate,
"RubyDate": time.RubyDate,
"RFC822": time.RFC822,
"RFC822Z": time.RFC822Z,
"RFC850": time.RFC850,
"RFC1123": time.RFC1123,
"RFC1123Z": time.RFC1123Z,
"RFC3339": time.RFC3339,
"RFC3339Nano": time.RFC3339Nano,
"Kitchen": time.Kitchen,
"Stamp": time.Stamp,
"StampMilli": time.StampMilli,
"StampMicro": time.StampMicro,
"StampNano": time.StampNano,
}[TimeFormatKey]
// When the TimeFormatKey does not exist in the previous map e.g.'2006-01-02 15:04:05'
if len(TimeFormat) == 0 {
TimeFormat = TimeFormatKey
TestTimeFormat, _ := time.Parse(TimeFormat, TimeFormat)
if TestTimeFormat.Format(time.RFC3339) != "2006-01-02T15:04:05Z" {
log.Fatal(4, "Can't create time properly, please check your time format has 2006, 01, 02, 15, 04 and 05")
}
log.Trace("Custom TimeFormat: %s", TimeFormat)
}
RunUser = Cfg.Section("").Key("RUN_USER").MustString(user.CurrentUsername())
// Does not check run user when the install lock is off.
if InstallLock {
currentUser, match := IsRunUserMatchCurrentUser(RunUser)
if !match {
log.Fatal(4, "Expect user '%s' but current user is: %s", RunUser, currentUser)
}
}
SSH.BuiltinServerUser = Cfg.Section("server").Key("BUILTIN_SSH_SERVER_USER").MustString(RunUser)
newRepository()
sec = Cfg.Section("picture")
AvatarUploadPath = sec.Key("AVATAR_UPLOAD_PATH").MustString(path.Join(AppDataPath, "avatars"))
forcePathSeparator(AvatarUploadPath)
if !filepath.IsAbs(AvatarUploadPath) {
AvatarUploadPath = path.Join(AppWorkPath, AvatarUploadPath)
}
AvatarMaxWidth = sec.Key("AVATAR_MAX_WIDTH").MustInt(4096)
AvatarMaxHeight = sec.Key("AVATAR_MAX_HEIGHT").MustInt(3072)
switch source := sec.Key("GRAVATAR_SOURCE").MustString("gravatar"); source {
case "duoshuo":
GravatarSource = "http://gravatar.duoshuo.com/avatar/"
case "gravatar":
GravatarSource = "https://secure.gravatar.com/avatar/"
case "libravatar":
GravatarSource = "https://seccdn.libravatar.org/avatar/"
default:
GravatarSource = source
}
DisableGravatar = sec.Key("DISABLE_GRAVATAR").MustBool()
EnableFederatedAvatar = sec.Key("ENABLE_FEDERATED_AVATAR").MustBool(!InstallLock)
if OfflineMode {
DisableGravatar = true
EnableFederatedAvatar = false
}
if DisableGravatar {
EnableFederatedAvatar = false
}
if EnableFederatedAvatar || !DisableGravatar {
GravatarSourceURL, err = url.Parse(GravatarSource)
if err != nil {
log.Fatal(4, "Failed to parse Gravatar URL(%s): %v",
GravatarSource, err)
}
}
if EnableFederatedAvatar {
LibravatarService = libravatar.New()
if GravatarSourceURL.Scheme == "https" {
LibravatarService.SetUseHTTPS(true)
LibravatarService.SetSecureFallbackHost(GravatarSourceURL.Host)
} else {
LibravatarService.SetUseHTTPS(false)
LibravatarService.SetFallbackHost(GravatarSourceURL.Host)
}
}
if err = Cfg.Section("ui").MapTo(&UI); err != nil {
log.Fatal(4, "Failed to map UI settings: %v", err)
} else if err = Cfg.Section("markdown").MapTo(&Markdown); err != nil {
log.Fatal(4, "Failed to map Markdown settings: %v", err)
} else if err = Cfg.Section("admin").MapTo(&Admin); err != nil {
log.Fatal(4, "Fail to map Admin settings: %v", err)
} else if err = Cfg.Section("api").MapTo(&API); err != nil {
log.Fatal(4, "Failed to map API settings: %v", err)
} else if err = Cfg.Section("metrics").MapTo(&Metrics); err != nil {
log.Fatal(4, "Failed to map Metrics settings: %v", err)
}
newCron()
newGit()
sec = Cfg.Section("mirror")
Mirror.MinInterval = sec.Key("MIN_INTERVAL").MustDuration(10 * time.Minute)
Mirror.DefaultInterval = sec.Key("DEFAULT_INTERVAL").MustDuration(8 * time.Hour)
if Mirror.MinInterval.Minutes() < 1 {
log.Warn("Mirror.MinInterval is too low")
Mirror.MinInterval = 1 * time.Minute
}
if Mirror.DefaultInterval < Mirror.MinInterval {
log.Warn("Mirror.DefaultInterval is less than Mirror.MinInterval")
Mirror.DefaultInterval = time.Hour * 8
}
Langs = Cfg.Section("i18n").Key("LANGS").Strings(",")
if len(Langs) == 0 {
Langs = []string{
"en-US", "zh-CN", "zh-HK", "zh-TW", "de-DE", "fr-FR", "nl-NL", "lv-LV",
"ru-RU", "uk-UA", "ja-JP", "es-ES", "pt-BR", "pl-PL", "bg-BG", "it-IT",
"fi-FI", "tr-TR", "cs-CZ", "sr-SP", "sv-SE", "ko-KR"}
}
Names = Cfg.Section("i18n").Key("NAMES").Strings(",")
if len(Names) == 0 {
Names = []string{"English", "简体中文", "繁體中文(香港)", "繁體中文(台灣)", "Deutsch",
"français", "Nederlands", "latviešu", "русский", "Українська", "日本語",
"español", "português do Brasil", "polski", "български", "italiano",
"suomi", "Türkçe", "čeština", "српски", "svenska", "한국어"}
}
dateLangs = Cfg.Section("i18n.datelang").KeysHash()
ShowFooterBranding = Cfg.Section("other").Key("SHOW_FOOTER_BRANDING").MustBool(false)
ShowFooterVersion = Cfg.Section("other").Key("SHOW_FOOTER_VERSION").MustBool(true)
ShowFooterTemplateLoadTime = Cfg.Section("other").Key("SHOW_FOOTER_TEMPLATE_LOAD_TIME").MustBool(true)
UI.ShowUserEmail = Cfg.Section("ui").Key("SHOW_USER_EMAIL").MustBool(true)
HasRobotsTxt = com.IsFile(path.Join(CustomPath, "robots.txt"))
newMarkup()
sec = Cfg.Section("U2F")
U2F.TrustedFacets, _ = shellquote.Split(sec.Key("TRUSTED_FACETS").MustString(strings.TrimRight(AppURL, "/")))
U2F.AppID = sec.Key("APP_ID").MustString(strings.TrimRight(AppURL, "/"))
}
func loadInternalToken(sec *ini.Section) string {
uri := sec.Key("INTERNAL_TOKEN_URI").String()
if len(uri) == 0 {
return loadOrGenerateInternalToken(sec)
}
tempURI, err := url.Parse(uri)
if err != nil {
log.Fatal(4, "Failed to parse INTERNAL_TOKEN_URI (%s): %v", uri, err)
}
switch tempURI.Scheme {
case "file":
fp, err := os.OpenFile(tempURI.RequestURI(), os.O_RDWR, 0600)
if err != nil {
log.Fatal(4, "Failed to open InternalTokenURI (%s): %v", uri, err)
}
defer fp.Close()
buf, err := ioutil.ReadAll(fp)
if err != nil {
log.Fatal(4, "Failed to read InternalTokenURI (%s): %v", uri, err)
}
// No token in the file, generate one and store it.
if len(buf) == 0 {
token, err := generate.NewInternalToken()
if err != nil {
log.Fatal(4, "Error generate internal token: %v", err)
}
if _, err := io.WriteString(fp, token); err != nil {
log.Fatal(4, "Error writing to InternalTokenURI (%s): %v", uri, err)
}
return token
}
return string(buf)
default:
log.Fatal(4, "Unsupported URI-Scheme %q (INTERNAL_TOKEN_URI = %q)", tempURI.Scheme, uri)
}
return ""
}
func loadOrGenerateInternalToken(sec *ini.Section) string {
var err error
token := sec.Key("INTERNAL_TOKEN").String()
if len(token) == 0 {
token, err = generate.NewInternalToken()
if err != nil {
log.Fatal(4, "Error generate internal token: %v", err)
}
// Save secret
cfgSave := ini.Empty()
if com.IsFile(CustomConf) {
// Keeps custom settings if there is already something.
if err := cfgSave.Append(CustomConf); err != nil {
log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
}
}
cfgSave.Section("security").Key("INTERNAL_TOKEN").SetValue(token)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
}
if err := cfgSave.SaveTo(CustomConf); err != nil {
log.Fatal(4, "Error saving generated INTERNAL_TOKEN to custom config: %v", err)
}
}
return token
}
// NewServices initializes the services
func NewServices() {
newService()
newLogService()
NewXORMLogService(false)
newCacheService()
newSessionService()
newMailService()
newRegisterMailService()
newNotifyMailService()
newWebhookService()
newIndexerService()
}
| [
"\"GITEA_WORK_DIR\"",
"\"GITEA_CUSTOM\""
]
| []
| [
"GITEA_CUSTOM",
"GITEA_WORK_DIR"
]
| [] | ["GITEA_CUSTOM", "GITEA_WORK_DIR"] | go | 2 | 0 | |
mypyc/test/test_external.py | """Test cases that run tests as subprocesses."""
from typing import List
import os
import subprocess
import sys
import unittest
base_dir = os.path.join(os.path.dirname(__file__), '..', '..')
class TestExternal(unittest.TestCase):
# TODO: Get this to work on Windows.
# (Or don't. It is probably not a good use of time.)
@unittest.skipIf(sys.platform.startswith("win"), "rt tests don't work on windows")
def test_c_unit_test(self) -> None:
"""Run C unit tests in a subprocess."""
# Build Google Test, the C++ framework we use for testing C code.
# The source code for Google Test is copied to this repository.
cppflags = [] # type: List[str]
env = os.environ.copy()
if sys.platform == 'darwin':
cppflags += ['-mmacosx-version-min=10.10', '-stdlib=libc++']
env['CPPFLAGS'] = ' '.join(cppflags)
subprocess.check_call(
['make', 'libgtest.a'],
env=env,
cwd=os.path.join(base_dir, 'mypyc', 'external', 'googletest', 'make'))
# Build Python wrapper for C unit tests.
env = os.environ.copy()
env['CPPFLAGS'] = ' '.join(cppflags)
status = subprocess.check_call(
[sys.executable, 'setup.py', 'build_ext', '--inplace'],
env=env,
cwd=os.path.join(base_dir, 'mypyc', 'lib-rt'))
# Run C unit tests.
env = os.environ.copy()
if 'GTEST_COLOR' not in os.environ:
env['GTEST_COLOR'] = 'yes' # Use fancy colors
status = subprocess.call([sys.executable, '-c',
'import sys, test_capi; sys.exit(test_capi.run_tests())'],
env=env,
cwd=os.path.join(base_dir, 'mypyc', 'lib-rt'))
if status != 0:
raise AssertionError("make test: C unit test failure")
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/nuctl/command/deploy.go | /*
Copyright 2017 The Nuclio Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/nuclio/nuclio/pkg/common"
"github.com/nuclio/nuclio/pkg/functionconfig"
nuctlcommon "github.com/nuclio/nuclio/pkg/nuctl/command/common"
"github.com/nuclio/nuclio/pkg/platform"
"github.com/nuclio/nuclio/pkg/platform/abstract"
"github.com/nuclio/errors"
"github.com/spf13/cobra"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
type deployCommandeer struct {
cmd *cobra.Command
rootCommandeer *RootCommandeer
functionConfig functionconfig.Config
functionBuild functionconfig.Build
functionName string
functionConfigPath string
description string
disable bool
publish bool
handler string
runtime string
image string
targetCPU int
runRegistry string
readinessTimeoutSeconds int
volumes stringSliceFlag
commands stringSliceFlag
encodedDataBindings string
encodedTriggers string
encodedLabels string
encodedRuntimeAttributes string
projectName string
resourceLimits stringSliceFlag
resourceRequests stringSliceFlag
encodedEnv stringSliceFlag
encodedFunctionPlatformConfig string
encodedBuildRuntimeAttributes string
encodedBuildCodeEntryAttributes string
inputImageFile string
loggerLevel string
replicas int
minReplicas int
maxReplicas int
}
func newDeployCommandeer(rootCommandeer *RootCommandeer) *deployCommandeer {
commandeer := &deployCommandeer{
rootCommandeer: rootCommandeer,
functionConfig: *functionconfig.NewConfig(),
}
cmd := &cobra.Command{
Use: "deploy function-name",
Short: "Build and deploy a function, or deploy from an existing image",
RunE: func(cmd *cobra.Command, args []string) error {
var err error
// initialize root
if err := rootCommandeer.initialize(); err != nil {
return errors.Wrap(err, "Failed to initialize root")
}
var importedFunction platform.Function
// update build stuff
if len(args) == 1 {
commandeer.functionName = args[0]
importedFunction, err = commandeer.getImportedFunction(args[0])
if err != nil {
return errors.Wrap(err, "Failed getting the imported function's data")
}
if importedFunction != nil {
commandeer.rootCommandeer.loggerInstance.Debug("Function was already imported, deploying it")
commandeer.functionConfig = commandeer.prepareFunctionConfigForRedeploy(importedFunction)
}
}
// If config file is provided
if importedFunction == nil && commandeer.functionConfigPath != "" {
commandeer.rootCommandeer.loggerInstance.DebugWith("Loading function config from file", "file", commandeer.functionConfigPath)
functionConfigFile, err := nuctlcommon.OpenFile(commandeer.functionConfigPath)
if err != nil {
return errors.Wrap(err, "Failed opening function config file")
}
functionBody, err := ioutil.ReadAll(functionConfigFile)
if err != nil {
return errors.Wrap(err, "Failed reading function config file")
}
unmarshalFunc, err := nuctlcommon.GetUnmarshalFunc(functionBody)
if err != nil {
return errors.Wrap(err, "Failed identifying function config file format")
}
err = unmarshalFunc(functionBody, &commandeer.functionConfig)
if err != nil {
return errors.Wrap(err, "Failed parsing function config file")
}
commandeer.rootCommandeer.loggerInstance.DebugWith("Successfully loaded function config", "functionConfig", commandeer.functionConfig)
}
// Populate initial defaults in the function spec, but consider existing values
// if the spec was brought from a file or from an already imported function.
commandeer.populateDeploymentDefaults()
// Override basic fields from the config
commandeer.functionConfig.Meta.Name = commandeer.functionName
commandeer.functionConfig.Meta.Namespace = rootCommandeer.namespace
commandeer.functionConfig.Spec.Build = commandeer.functionBuild
commandeer.functionConfig.Spec.Build.Commands = commandeer.commands
commandeer.functionConfig.Spec.Build.FunctionConfigPath = commandeer.functionConfigPath
// Enrich function config with args
commandeer.enrichConfigWithStringArgs()
commandeer.enrichConfigWithIntArgs()
commandeer.enrichConfigWithBoolArgs()
err = commandeer.enrichConfigWithComplexArgs()
if err != nil {
return errors.Wrap(err, "Failed config with complex args")
}
// Ensure the skip-annotations never exist on deploy
commandeer.functionConfig.Meta.RemoveSkipBuildAnnotation()
commandeer.functionConfig.Meta.RemoveSkipDeployAnnotation()
commandeer.rootCommandeer.loggerInstance.DebugWith("Deploying function", "functionConfig", commandeer.functionConfig)
_, deployErr := rootCommandeer.platform.CreateFunction(&platform.CreateFunctionOptions{
Logger: rootCommandeer.loggerInstance,
FunctionConfig: commandeer.functionConfig,
InputImageFile: commandeer.inputImageFile,
})
// don't check deploy error yet, first try to save the logs either way, and then return the error if necessary
commandeer.rootCommandeer.loggerInstance.Debug("Saving deployment logs")
logSaveErr := rootCommandeer.platform.SaveFunctionDeployLogs(commandeer.functionName, rootCommandeer.namespace)
if deployErr != nil {
// preserve the error and let the root commandeer handle unwrapping it
return deployErr
}
return logSaveErr
},
}
addDeployFlags(cmd, commandeer)
cmd.Flags().StringVarP(&commandeer.inputImageFile, "input-image-file", "", "", "Path to input of docker archive")
commandeer.cmd = cmd
return commandeer
}
func addDeployFlags(cmd *cobra.Command,
commandeer *deployCommandeer) {
addBuildFlags(cmd, &commandeer.functionBuild, &commandeer.functionConfigPath, &commandeer.runtime, &commandeer.handler, &commandeer.commands, &commandeer.encodedBuildRuntimeAttributes, &commandeer.encodedBuildCodeEntryAttributes)
cmd.Flags().StringVar(&commandeer.description, "desc", "", "Function description")
cmd.Flags().StringVarP(&commandeer.encodedLabels, "labels", "l", "", "Additional function labels (lbl1=val1[,lbl2=val2,...])")
cmd.Flags().VarP(&commandeer.encodedEnv, "env", "e", "Environment variables env1=val1")
cmd.Flags().BoolVarP(&commandeer.disable, "disable", "d", false, "Start the function as disabled (don't run yet)")
cmd.Flags().IntVarP(&commandeer.replicas, "replicas", "", -1, "Set to any non-negative integer to use a static number of replicas")
cmd.Flags().IntVar(&commandeer.minReplicas, "min-replicas", -1, "Minimal number of function replicas")
cmd.Flags().IntVar(&commandeer.maxReplicas, "max-replicas", -1, "Maximal number of function replicas")
cmd.Flags().IntVar(&commandeer.targetCPU, "target-cpu", -1, "Target CPU when auto-scaling, in percentage")
cmd.Flags().BoolVar(&commandeer.publish, "publish", false, "Publish the function")
cmd.Flags().StringVar(&commandeer.encodedDataBindings, "data-bindings", "", "JSON-encoded data bindings for the function")
cmd.Flags().StringVar(&commandeer.encodedTriggers, "triggers", "", "JSON-encoded triggers for the function")
cmd.Flags().StringVar(&commandeer.encodedFunctionPlatformConfig, "platform-config", "", "JSON-encoded platform specific configuration")
cmd.Flags().StringVar(&commandeer.image, "run-image", "", "Name of an existing image to deploy (default - build a new image to deploy)")
cmd.Flags().StringVar(&commandeer.runRegistry, "run-registry", "", "URL of a registry for pulling the image, if differs from -r/--registry (env: NUCTL_RUN_REGISTRY)")
cmd.Flags().StringVar(&commandeer.encodedRuntimeAttributes, "runtime-attrs", "", "JSON-encoded runtime attributes for the function")
cmd.Flags().IntVar(&commandeer.readinessTimeoutSeconds, "readiness-timeout", -1, "maximum wait time for the function to be ready")
cmd.Flags().StringVar(&commandeer.projectName, "project-name", "", "name of project to which this function belongs to")
cmd.Flags().Var(&commandeer.volumes, "volume", "Volumes for the deployment function (src1=dest1[,src2=dest2,...])")
cmd.Flags().Var(&commandeer.resourceLimits, "resource-limit", "Limits resources in the format of resource-name=quantity (e.g. cpu=3)")
cmd.Flags().Var(&commandeer.resourceRequests, "resource-request", "Requests resources in the format of resource-name=quantity (e.g. cpu=3)")
cmd.Flags().StringVar(&commandeer.loggerLevel, "logger-level", "", "One of debug, info, warn, error. By default, uses platform configuration")
}
func parseResourceAllocations(values stringSliceFlag, resources *v1.ResourceList) error {
for _, value := range values {
// split the value @ =
resourceNameAndQuantity := strings.Split(value, "=")
// must be exactly 2 (resource name, quantity)
if len(resourceNameAndQuantity) != 2 {
return errors.Errorf("Resource allocation %s not in the format of resource-name=quantity", value)
}
resourceName := v1.ResourceName(resourceNameAndQuantity[0])
resourceQuantityString := resourceNameAndQuantity[1]
resourceQuantity, err := resource.ParseQuantity(resourceQuantityString)
if err != nil {
return errors.Wrap(err, "Failed to parse quantity")
}
if *resources == nil {
*resources = v1.ResourceList{}
}
// set resource
(*resources)[resourceName] = resourceQuantity
}
return nil
}
func parseVolumes(volumes stringSliceFlag) ([]functionconfig.Volume, error) {
var originVolumes []functionconfig.Volume
for volumeIndex, volume := range volumes {
// decode volumes
volumeSrcAndDestination := strings.Split(volume, ":")
// must be exactly 2 (resource name, quantity)
if len(volumeSrcAndDestination) != 2 || len(volumeSrcAndDestination[0]) == 0 || len(volumeSrcAndDestination[1]) == 0 {
return []functionconfig.Volume{}, errors.Errorf("Volume format %s not in the format of volume-src:volume-destination", volumeSrcAndDestination)
}
// generate simple volume name
volumeName := fmt.Sprintf("volume-%v", volumeIndex+1)
originVolumes = append(originVolumes,
functionconfig.Volume{
Volume: v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: volumeSrcAndDestination[0],
},
},
},
VolumeMount: v1.VolumeMount{
Name: volumeName,
MountPath: volumeSrcAndDestination[1],
},
},
)
}
return originVolumes, nil
}
// If user runs deploy with a function name of a function that was already imported, this checks if that function
// exists and is imported. If so, returns that function, otherwise returns nil.
func (d *deployCommandeer) getImportedFunction(functionName string) (platform.Function, error) {
functions, err := d.rootCommandeer.platform.GetFunctions(&platform.GetFunctionsOptions{
Name: functionName,
Namespace: d.rootCommandeer.namespace,
})
if err != nil {
return nil, errors.Wrap(err, "Failed to check existing functions")
}
if len(functions) == 0 {
return nil, nil
}
function := functions[0]
if err := function.Initialize(nil); err != nil {
// debug level, we don't want to spam user output when we just try to import an non-existent function
d.rootCommandeer.loggerInstance.DebugWith("Failed to initialize function", "err", err.Error())
}
if function.GetStatus().State == functionconfig.FunctionStateImported {
return function, nil
}
return nil, nil
}
func (d *deployCommandeer) prepareFunctionConfigForRedeploy(importedFunction platform.Function) functionconfig.Config {
functionConfig := importedFunction.GetConfig()
// Ensure RunRegistry is taken from the commandeer config
functionConfig.CleanFunctionSpec()
functionConfig.Spec.RunRegistry = d.functionConfig.Spec.RunRegistry
return *functionConfig
}
func (d *deployCommandeer) populateDeploymentDefaults() {
if d.functionConfig.Spec.TargetCPU == 0 {
d.functionConfig.Spec.TargetCPU = abstract.DefaultTargetCPU
}
if d.functionConfig.Spec.RunRegistry == "" {
d.functionConfig.Spec.RunRegistry = os.Getenv("NUCTL_RUN_REGISTRY")
}
if d.functionConfig.Spec.ReadinessTimeoutSeconds == 0 {
d.functionConfig.Spec.ReadinessTimeoutSeconds = abstract.DefaultReadinessTimeoutSeconds
}
if d.functionConfig.Spec.DataBindings == nil {
d.functionConfig.Spec.DataBindings = map[string]functionconfig.DataBinding{}
}
if d.functionConfig.Spec.Triggers == nil {
d.functionConfig.Spec.Triggers = map[string]functionconfig.Trigger{}
}
if d.functionConfig.Spec.RuntimeAttributes == nil {
d.functionConfig.Spec.RuntimeAttributes = map[string]interface{}{}
}
}
func (d *deployCommandeer) enrichConfigWithStringArgs() {
if d.description != "" {
d.functionConfig.Spec.Description = d.description
}
if d.image != "" {
d.functionConfig.Spec.Image = d.image
}
if d.runRegistry != "" {
d.functionConfig.Spec.RunRegistry = d.runRegistry
}
if d.runtime != "" {
d.functionConfig.Spec.Runtime = d.runtime
}
if d.handler != "" {
d.functionConfig.Spec.Handler = d.handler
}
// check if logger level is set
if d.loggerLevel != "" {
d.functionConfig.Spec.LoggerSinks = []functionconfig.LoggerSink{
{Level: d.loggerLevel},
}
}
}
func (d *deployCommandeer) enrichConfigWithBoolArgs() {
if d.disable {
d.functionConfig.Spec.Disable = d.disable
}
if d.publish {
d.functionConfig.Spec.Publish = d.publish
}
}
func (d *deployCommandeer) enrichConfigWithIntArgs() {
// any negative value counted as not set (meaning leaving commandeer.functionConfig.Spec.Replicas as nil)
if d.replicas >= 0 {
d.functionConfig.Spec.Replicas = &d.replicas
}
// any negative value counted as not set (meaning leaving commandeer.functionConfig.Spec.MinReplicas as nil)
if d.minReplicas >= 0 {
d.functionConfig.Spec.MinReplicas = &d.minReplicas
}
// any negative value counted as not set (meaning leaving commandeer.functionConfig.Spec.MaxReplicas as nil)
if d.maxReplicas >= 0 {
d.functionConfig.Spec.MaxReplicas = &d.maxReplicas
}
// any negative value counted as not set (meaning leaving commandeer.functionConfig.Spec.TargetCPU as default)
if d.targetCPU >= 0 {
d.functionConfig.Spec.TargetCPU = d.targetCPU
}
// any negative value counted as not set (meaning leaving commandeer.functionConfig.Spec.ReadinessTimeoutSeconds as default)
if d.readinessTimeoutSeconds >= 0 {
d.functionConfig.Spec.ReadinessTimeoutSeconds = d.readinessTimeoutSeconds
}
}
func (d *deployCommandeer) enrichConfigWithComplexArgs() error {
// parse volumes
volumes, err := parseVolumes(d.volumes)
if err != nil {
return errors.Wrap(err, "Failed to parse volumes")
}
d.functionConfig.Spec.Volumes = append(d.functionConfig.Spec.Volumes, volumes...)
// parse resource limits
if err := parseResourceAllocations(d.resourceLimits,
&d.functionConfig.Spec.Resources.Limits); err != nil {
return errors.Wrap(err, "Failed to parse resource limits")
}
// parse resource requests
if err := parseResourceAllocations(d.resourceRequests,
&d.functionConfig.Spec.Resources.Requests); err != nil {
return errors.Wrap(err, "Failed to parse resource requests")
}
// decode the JSON data bindings
if d.encodedDataBindings != "" {
if err := json.Unmarshal([]byte(d.encodedDataBindings),
&d.functionConfig.Spec.DataBindings); err != nil {
return errors.Wrap(err, "Failed to decode data bindings")
}
}
// decode the JSON triggers
if d.encodedTriggers != "" {
if err := json.Unmarshal([]byte(d.encodedTriggers),
&d.functionConfig.Spec.Triggers); err != nil {
return errors.Wrap(err, "Failed to decode triggers")
}
}
// decode the JSON function platform configuration
if d.encodedFunctionPlatformConfig != "" {
if err := json.Unmarshal([]byte(d.encodedFunctionPlatformConfig),
&d.functionConfig.Spec.Platform); err != nil {
return errors.Wrap(err, "Failed to decode function platform configuration")
}
}
// decode the JSON runtime attributes
if d.encodedRuntimeAttributes != "" {
if err := json.Unmarshal([]byte(d.encodedRuntimeAttributes),
&d.functionConfig.Spec.RuntimeAttributes); err != nil {
return errors.Wrap(err, "Failed to decode runtime attributes")
}
}
// decode the JSON build runtime attributes
if d.encodedBuildRuntimeAttributes != "" {
if err := json.Unmarshal([]byte(d.encodedBuildRuntimeAttributes),
&d.functionConfig.Spec.Build.RuntimeAttributes); err != nil {
return errors.Wrap(err, "Failed to decode build runtime attributes")
}
}
// decode the JSON build code entry attributes
if d.encodedBuildCodeEntryAttributes != "" {
if err := json.Unmarshal([]byte(d.encodedBuildCodeEntryAttributes),
&d.functionConfig.Spec.Build.CodeEntryAttributes); err != nil {
return errors.Wrap(err, "Failed to decode code entry attributes")
}
}
// decode labels
if d.functionConfig.Meta.Labels == nil {
d.functionConfig.Meta.Labels = map[string]string{}
}
for label, labelValue := range common.StringToStringMap(d.encodedLabels, "=") {
d.functionConfig.Meta.Labels[label] = labelValue
}
// if the project name was set, add it as a label (not in string enrichment, because it's part of the labels)
if d.projectName != "" {
d.functionConfig.Meta.Labels["nuclio.io/project-name"] = d.projectName
}
// decode env
for _, encodedEnvNameAndValue := range d.encodedEnv {
envNameAndValue := strings.SplitN(encodedEnvNameAndValue, "=", 2)
if len(envNameAndValue) != 2 {
return errors.Errorf("Environment variable must be in the form of name=value: %s",
encodedEnvNameAndValue)
}
d.functionConfig.Spec.Env = append(d.functionConfig.Spec.Env, v1.EnvVar{
Name: envNameAndValue[0],
Value: envNameAndValue[1],
})
}
return nil
}
| [
"\"NUCTL_RUN_REGISTRY\""
]
| []
| [
"NUCTL_RUN_REGISTRY"
]
| [] | ["NUCTL_RUN_REGISTRY"] | go | 1 | 0 | |
django_rest/django_rest/wsgi.py | """
WSGI config for django_rest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_rest.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
first/asgi.py | """
ASGI config for first project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
python/setup.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, exec-used
"""Setup TVM package."""
from __future__ import absolute_import
import os
import shutil
import sys
import sysconfig
import platform
from setuptools import find_packages
from setuptools.dist import Distribution
# need to use distutils.core for correct placement of cython dll
if "--inplace" in sys.argv:
from distutils.core import setup
from distutils.extension import Extension
else:
from setuptools import setup
from setuptools.extension import Extension
CURRENT_DIR = os.path.dirname(__file__)
def get_lib_path():
"""Get library path, name and version"""
# We can not import `libinfo.py` in setup.py directly since __init__.py
# Will be invoked which introduces dependences
libinfo_py = os.path.join(CURRENT_DIR, "./tvm/_ffi/libinfo.py")
libinfo = {"__file__": libinfo_py}
exec(compile(open(libinfo_py, "rb").read(), libinfo_py, "exec"), libinfo, libinfo)
version = libinfo["__version__"]
if not os.getenv("CONDA_BUILD"):
lib_path = libinfo["find_lib_path"]()
libs = [lib_path[0]]
if libs[0].find("runtime") == -1:
for name in lib_path[1:]:
if name.find("runtime") != -1:
libs.append(name)
break
else:
libs = None
return libs, version
LIB_LIST, __version__ = get_lib_path()
def config_cython():
"""Try to configure cython and return cython configuration"""
if os.name == "nt":
print("WARNING: Cython is not supported on Windows, will compile without cython module")
return []
sys_cflags = sysconfig.get_config_var("CFLAGS")
if "i386" in sys_cflags and "x86_64" in sys_cflags:
print("WARNING: Cython library may not be compiled correctly with both i386 and x64")
return []
try:
from Cython.Build import cythonize
# from setuptools.extension import Extension
if sys.version_info >= (3, 0):
subdir = "_cy3"
else:
subdir = "_cy2"
ret = []
path = "tvm/_ffi/_cython"
if os.name == "nt":
library_dirs = ["tvm", "../build/Release", "../build"]
libraries = ["libtvm"]
else:
library_dirs = None
libraries = None
for fn in os.listdir(path):
if not fn.endswith(".pyx"):
continue
ret.append(
Extension(
"tvm._ffi.%s.%s" % (subdir, fn[:-4]),
["tvm/_ffi/_cython/%s" % fn],
include_dirs=[
"../include/",
"../3rdparty/dmlc-core/include",
"../3rdparty/dlpack/include",
],
extra_compile_args=["-std=c++14"],
library_dirs=library_dirs,
libraries=libraries,
language="c++",
)
)
return cythonize(ret, compiler_directives={"language_level": 3})
except ImportError:
print("WARNING: Cython is not installed, will compile without cython module")
return []
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
def is_pure(self):
return False
include_libs = False
wheel_include_libs = False
if not os.getenv("CONDA_BUILD"):
if "bdist_wheel" in sys.argv:
wheel_include_libs = True
else:
include_libs = True
setup_kwargs = {}
# For bdist_wheel only
if wheel_include_libs:
with open("MANIFEST.in", "w") as fo:
for path in LIB_LIST:
shutil.copy(path, os.path.join(CURRENT_DIR, "tvm"))
_, libname = os.path.split(path)
fo.write("include tvm/%s\n" % libname)
setup_kwargs = {"include_package_data": True}
if include_libs:
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
for i, path in enumerate(LIB_LIST):
LIB_LIST[i] = os.path.relpath(path, curr_path)
setup_kwargs = {"include_package_data": True, "data_files": [("tvm", LIB_LIST)]}
def get_package_data_files():
# Relay standard libraries
return ["relay/std/prelude.rly", "relay/std/core.rly"]
setup(
name="tvm",
version=__version__,
description="TVM: An End to End Tensor IR/DSL Stack for Deep Learning Systems",
zip_safe=False,
entry_points={"console_scripts": ["tvmc = tvm.driver.tvmc.main:main"]},
install_requires=[
"numpy",
"scipy",
"decorator",
"attrs",
"psutil",
"typed_ast",
],
extras_require={
"test": ["pillow<7", "matplotlib"],
"extra_feature": [
"tornado",
"psutil",
"xgboost>=1.1.0",
"mypy",
"orderedset",
],
"tvmc": [
"tensorflow>=2.1.0",
"tflite>=2.1.0",
"onnx>=1.7.0",
"onnxruntime>=1.0.0",
"torch>=1.4.0",
"torchvision>=0.5.0",
],
},
packages=find_packages(),
package_dir={"tvm": "tvm"},
package_data={"tvm": get_package_data_files()},
distclass=BinaryDistribution,
url="https://github.com/apache/incubator-tvm",
ext_modules=config_cython(),
**setup_kwargs,
)
if wheel_include_libs:
# Wheel cleanup
os.remove("MANIFEST.in")
for path in LIB_LIST:
_, libname = os.path.split(path)
os.remove("tvm/%s" % libname)
| []
| []
| [
"CONDA_BUILD"
]
| [] | ["CONDA_BUILD"] | python | 1 | 0 | |
qiskit/aqua/utils/run_circuits.py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" run circuits functions """
import sys
import logging
import time
import copy
import os
import uuid
import numpy as np
from qiskit.providers import BaseBackend, JobStatus, JobError
from qiskit.providers.jobstatus import JOB_FINAL_STATES
from qiskit.providers.basicaer import BasicAerJob
from qiskit.qobj import QasmQobj
from qiskit.exceptions import QiskitError
from qiskit.aqua.aqua_error import AquaError
from qiskit.aqua.utils.backend_utils import (is_aer_provider,
is_basicaer_provider,
is_simulator_backend,
is_local_backend,
is_ibmq_provider)
MAX_CIRCUITS_PER_JOB = os.environ.get('QISKIT_AQUA_MAX_CIRCUITS_PER_JOB', None)
MAX_GATES_PER_JOB = os.environ.get('QISKIT_AQUA_MAX_GATES_PER_JOB', None)
logger = logging.getLogger(__name__)
def find_regs_by_name(circuit, name, qreg=True):
"""Find the registers in the circuits.
Args:
circuit (QuantumCircuit): the quantum circuit.
name (str): name of register
qreg (bool): quantum or classical register
Returns:
QuantumRegister or ClassicalRegister or None: if not found, return None.
"""
found_reg = None
regs = circuit.qregs if qreg else circuit.cregs
for reg in regs:
if reg.name == name:
found_reg = reg
break
return found_reg
def _combine_result_objects(results):
"""Temporary helper function.
TODO:
This function would be removed after Terra supports job with infinite circuits.
"""
if len(results) == 1:
return results[0]
new_result = copy.deepcopy(results[0])
for idx in range(1, len(results)):
new_result.results.extend(results[idx].results)
return new_result
def _split_qobj_to_qobjs(qobj, chunk_size):
qobjs = []
num_chunks = int(np.ceil(len(qobj.experiments) / chunk_size))
if num_chunks == 1:
qobjs = [qobj]
else:
if isinstance(qobj, QasmQobj):
qobj_template = QasmQobj(qobj_id=qobj.qobj_id,
config=qobj.config, experiments=[], header=qobj.header)
for i in range(num_chunks):
temp_qobj = copy.deepcopy(qobj_template)
temp_qobj.qobj_id = str(uuid.uuid4())
temp_qobj.experiments = qobj.experiments[i * chunk_size:(i + 1) * chunk_size]
qobjs = _maybe_split_qobj_by_gates(qobjs, temp_qobj)
else:
raise AquaError("Only support QasmQobj now.")
return qobjs
def _maybe_split_qobj_by_gates(qobjs, qobj):
if MAX_GATES_PER_JOB is not None:
max_gates_per_job = int(MAX_GATES_PER_JOB)
total_num_gates = 0
for j in range(len(qobj.experiments)):
total_num_gates += len(qobj.experiments[j].instructions)
# split by gates if total number of gates in a qobj exceed MAX_GATES_PER_JOB
if total_num_gates > max_gates_per_job:
qobj_template = QasmQobj(qobj_id=qobj.qobj_id,
config=qobj.config, experiments=[], header=qobj.header)
temp_qobj = copy.deepcopy(qobj_template)
temp_qobj.qobj_id = str(uuid.uuid4())
temp_qobj.experiments = []
num_gates = 0
for i in range(len(qobj.experiments)):
num_gates += len(qobj.experiments[i].instructions)
if num_gates <= max_gates_per_job:
temp_qobj.experiments.append(qobj.experiments[i])
else:
qobjs.append(temp_qobj)
# Initialize for next temp_qobj
temp_qobj = copy.deepcopy(qobj_template)
temp_qobj.qobj_id = str(uuid.uuid4())
temp_qobj.experiments.append(qobj.experiments[i])
num_gates = len(qobj.experiments[i].instructions)
qobjs.append(temp_qobj)
else:
qobjs.append(qobj)
else:
qobjs.append(qobj)
return qobjs
def _safe_submit_qobj(qobj, backend, backend_options, noise_config, skip_qobj_validation):
# assure get job ids
while True:
try:
job = run_on_backend(backend, qobj, backend_options=backend_options,
noise_config=noise_config,
skip_qobj_validation=skip_qobj_validation)
job_id = job.job_id()
break
except QiskitError as ex:
logger.warning("FAILURE: Can not get job id, Resubmit the qobj to get job id. "
"Terra job error: %s ", ex)
if is_ibmq_provider(backend) and 'Error code: 3458' in str(ex):
# TODO Use IBMQBackendJobLimitError when new IBM Q provider is released.
oldest_running = backend.jobs(limit=1, descending=False,
status=['QUEUED', 'VALIDATING', 'RUNNING'])
if oldest_running:
oldest_running = oldest_running[0]
logger.warning("Job limit reached, waiting for job %s to finish "
"before submitting the next one.", oldest_running.job_id())
oldest_running.wait_for_final_state(timeout=300)
except Exception as ex: # pylint: disable=broad-except
logger.warning("FAILURE: Can not get job id, Resubmit the qobj to get job id."
"Error: %s ", ex)
return job, job_id
def _safe_get_job_status(job, job_id):
while True:
try:
job_status = job.status()
break
except JobError as ex:
logger.warning("FAILURE: job id: %s, "
"status: 'FAIL_TO_GET_STATUS' "
"Terra job error: %s", job_id, ex)
time.sleep(5)
except Exception as ex: # pylint: disable=broad-except
raise AquaError("FAILURE: job id: {}, "
"status: 'FAIL_TO_GET_STATUS' "
"Unknown error: ({})".format(job_id, ex)) from ex
return job_status
def run_qobj(qobj, backend, qjob_config=None, backend_options=None,
noise_config=None, skip_qobj_validation=False, job_callback=None):
"""
An execution wrapper with Qiskit-Terra, with job auto recover capability.
The auto-recovery feature is only applied for non-simulator backend.
This wrapper will try to get the result no matter how long it takes.
Args:
qobj (QasmQobj): qobj to execute
backend (BaseBackend): backend instance
qjob_config (dict, optional): configuration for quantum job object
backend_options (dict, optional): configuration for simulator
noise_config (dict, optional): configuration for noise model
skip_qobj_validation (bool, optional): Bypass Qobj validation to decrease submission time,
only works for Aer and BasicAer providers
job_callback (Callable, optional): callback used in querying info of the submitted job, and
providing the following arguments:
job_id, job_status, queue_position, job
Returns:
Result: Result object
Raises:
ValueError: invalid backend
AquaError: Any error except for JobError raised by Qiskit Terra
"""
qjob_config = qjob_config or {}
backend_options = backend_options or {}
noise_config = noise_config or {}
if backend is None or not isinstance(backend, BaseBackend):
raise ValueError('Backend is missing or not an instance of BaseBackend')
with_autorecover = not is_simulator_backend(backend)
if MAX_CIRCUITS_PER_JOB is not None:
max_circuits_per_job = int(MAX_CIRCUITS_PER_JOB)
else:
if is_local_backend(backend):
max_circuits_per_job = sys.maxsize
else:
max_circuits_per_job = backend.configuration().max_experiments
# split qobj if it exceeds the payload of the backend
qobjs = _split_qobj_to_qobjs(qobj, max_circuits_per_job)
jobs = []
job_ids = []
for qob in qobjs:
job, job_id = _safe_submit_qobj(qob, backend,
backend_options, noise_config, skip_qobj_validation)
job_ids.append(job_id)
jobs.append(job)
results = []
if with_autorecover:
logger.info("Backend status: %s", backend.status())
logger.info("There are %s jobs are submitted.", len(jobs))
logger.info("All job ids:\n%s", job_ids)
for idx, _ in enumerate(jobs):
job = jobs[idx]
job_id = job_ids[idx]
while True:
logger.info("Running %s-th qobj, job id: %s", idx, job_id)
# try to get result if possible
while True:
job_status = _safe_get_job_status(job, job_id)
queue_position = 0
if job_status in JOB_FINAL_STATES:
# do callback again after the job is in the final states
if job_callback is not None:
job_callback(job_id, job_status, queue_position, job)
break
if job_status == JobStatus.QUEUED:
queue_position = job.queue_position()
logger.info("Job id: %s is queued at position %s", job_id, queue_position)
else:
logger.info("Job id: %s, status: %s", job_id, job_status)
if job_callback is not None:
job_callback(job_id, job_status, queue_position, job)
time.sleep(qjob_config['wait'])
# get result after the status is DONE
if job_status == JobStatus.DONE:
while True:
result = job.result(**qjob_config)
if result.success:
results.append(result)
logger.info("COMPLETED the %s-th qobj, job id: %s", idx, job_id)
break
logger.warning("FAILURE: Job id: %s", job_id)
logger.warning("Job (%s) is completed anyway, retrieve result "
"from backend again.", job_id)
job = backend.retrieve_job(job_id)
break
# for other cases, resubmit the qobj until the result is available.
# since if there is no result returned, there is no way algorithm can do any process
# get back the qobj first to avoid for job is consumed
qobj = job.qobj()
if job_status == JobStatus.CANCELLED:
logger.warning("FAILURE: Job id: %s is cancelled. Re-submit the Qobj.",
job_id)
elif job_status == JobStatus.ERROR:
logger.warning("FAILURE: Job id: %s encounters the error. "
"Error is : %s. Re-submit the Qobj.",
job_id, job.error_message())
else:
logging.warning("FAILURE: Job id: %s. Unknown status: %s. "
"Re-submit the Qobj.", job_id, job_status)
job, job_id = _safe_submit_qobj(qobj, backend,
backend_options,
noise_config, skip_qobj_validation)
jobs[idx] = job
job_ids[idx] = job_id
else:
results = []
for job in jobs:
results.append(job.result(**qjob_config))
result = _combine_result_objects(results) if results else None
# If result was not successful then raise an exception with either the status msg or
# extra information if this was an Aer partial result return
if not result.success:
msg = result.status
if result.status == 'PARTIAL COMPLETED':
# Aer can return partial results which Aqua algorithms cannot process and signals
# using partial completed status where each returned result has a success and status.
# We use the status from the first result that was not successful
for res in result.results:
if not res.success:
msg += ', ' + res.status
break
raise AquaError('Circuit execution failed: {}'.format(msg))
return result
# skip_qobj_validation = True does what backend.run
# and aerjob.submit do, but without qobj validation.
def run_on_backend(backend, qobj, backend_options=None,
noise_config=None, skip_qobj_validation=False):
""" run on backend """
if skip_qobj_validation:
job_id = str(uuid.uuid4())
if is_aer_provider(backend):
# pylint: disable=import-outside-toplevel
from qiskit.providers.aer.aerjob import AerJob
temp_backend_options = \
backend_options['backend_options'] if backend_options != {} else None
temp_noise_config = noise_config['noise_model'] if noise_config != {} else None
job = AerJob(backend, job_id,
backend._run_job, qobj, temp_backend_options, temp_noise_config, False)
job._future = job._executor.submit(job._fn, job._job_id, job._qobj, *job._args)
elif is_basicaer_provider(backend):
backend._set_options(qobj_config=qobj.config, **backend_options)
job = BasicAerJob(backend, job_id, backend._run_job, qobj)
job._future = job._executor.submit(job._fn, job._job_id, job._qobj)
else:
logger.info(
"Can't skip qobj validation for the %s provider.",
backend.provider().__class__.__name__)
job = backend.run(qobj, **backend_options, **noise_config)
return job
else:
job = backend.run(qobj, **backend_options, **noise_config)
return job
| []
| []
| [
"QISKIT_AQUA_MAX_GATES_PER_JOB",
"QISKIT_AQUA_MAX_CIRCUITS_PER_JOB"
]
| [] | ["QISKIT_AQUA_MAX_GATES_PER_JOB", "QISKIT_AQUA_MAX_CIRCUITS_PER_JOB"] | python | 2 | 0 | |
terraform/providers/ibm/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/service/functions/resource_ibm_function_package.go | // Copyright IBM Corp. 2017, 2021 All Rights Reserved.
// Licensed under the Mozilla Public License v2.0
package functions
import (
"fmt"
"log"
"net/http"
"os"
"strings"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate"
"github.com/apache/openwhisk-client-go/whisk"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (
funcPkgNamespace = "namespace"
funcPkgName = "name"
funcPkgUsrDefAnnots = "user_defined_annotations"
funcPkgUsrDefParams = "user_defined_parameters"
funcPkgBindPkgName = "bind_package_name"
)
func ResourceIBMFunctionPackage() *schema.Resource {
return &schema.Resource{
Create: resourceIBMFunctionPackageCreate,
Read: resourceIBMFunctionPackageRead,
Update: resourceIBMFunctionPackageUpdate,
Delete: resourceIBMFunctionPackageDelete,
Exists: resourceIBMFunctionPackageExists,
Importer: &schema.ResourceImporter{},
Schema: map[string]*schema.Schema{
funcPkgNamespace: {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "IBM Cloud function namespace.",
ValidateFunc: validate.InvokeValidator("ibm_function_package", funcPkgNamespace),
},
funcPkgName: {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Name of package.",
ValidateFunc: validate.InvokeValidator("ibm_function_package", funcPkgName),
},
"publish": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Package visibilty.",
},
"version": {
Type: schema.TypeString,
Computed: true,
Description: "Semantic version of the item.",
},
funcPkgUsrDefAnnots: {
Type: schema.TypeString,
Optional: true,
Description: "Annotation values in KEY VALUE format.",
Default: "[]",
ValidateFunc: validate.InvokeValidator("ibm_function_package", funcPkgUsrDefAnnots),
DiffSuppressFunc: flex.SuppressEquivalentJSON,
StateFunc: func(v interface{}) string {
json, _ := flex.NormalizeJSONString(v)
return json
},
},
funcPkgUsrDefParams: {
Type: schema.TypeString,
Optional: true,
Description: "Parameters values in KEY VALUE format. Parameter bindings included in the context passed to the package.",
ValidateFunc: validate.InvokeValidator("ibm_function_package", funcPkgUsrDefParams),
Default: "[]",
DiffSuppressFunc: flex.SuppressEquivalentJSON,
StateFunc: func(v interface{}) string {
json, _ := flex.NormalizeJSONString(v)
return json
},
},
"annotations": {
Type: schema.TypeString,
Computed: true,
Description: "All annotations set on package by user and those set by the IBM Cloud Function backend/API.",
},
"parameters": {
Type: schema.TypeString,
Computed: true,
Description: "All parameters set on package by user and those set by the IBM Cloud Function backend/API.",
},
funcPkgBindPkgName: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Description: "Name of package to be binded.",
ValidateFunc: validate.InvokeValidator("ibm_function_package", funcPkgBindPkgName),
DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool {
if o == "" {
return false
}
if strings.Compare(n, o) == 0 {
return true
}
return false
},
},
"package_id": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func ResourceIBMFuncPackageValidator() *validate.ResourceValidator {
validateSchema := make([]validate.ValidateSchema, 0)
validateSchema = append(validateSchema,
validate.ValidateSchema{
Identifier: funcPkgName,
ValidateFunctionIdentifier: validate.ValidateRegexp,
Type: validate.TypeString,
Regexp: `\A([\w]|[\w][\w@ .-]*[\[email protected]]+)\z`,
Required: true})
validateSchema = append(validateSchema,
validate.ValidateSchema{
Identifier: funcPkgNamespace,
ValidateFunctionIdentifier: validate.ValidateNoZeroValues,
Type: validate.TypeString,
Required: true})
validateSchema = append(validateSchema,
validate.ValidateSchema{
Identifier: funcPkgUsrDefAnnots,
ValidateFunctionIdentifier: validate.ValidateJSONString,
Type: validate.TypeString,
Default: "[]",
Optional: true})
validateSchema = append(validateSchema,
validate.ValidateSchema{
Identifier: funcPkgBindPkgName,
ValidateFunctionIdentifier: validate.ValidateBindedPackageName,
Type: validate.TypeString,
Optional: true})
ibmFuncPackageResourceValidator := validate.ResourceValidator{ResourceName: "ibm_function_package", Schema: validateSchema}
return &ibmFuncPackageResourceValidator
}
func resourceIBMFunctionPackageCreate(d *schema.ResourceData, meta interface{}) error {
functionNamespaceAPI, err := meta.(conns.ClientSession).FunctionIAMNamespaceAPI()
if err != nil {
return err
}
bxSession, err := meta.(conns.ClientSession).BluemixSession()
if err != nil {
return err
}
namespace := d.Get("namespace").(string)
wskClient, err := conns.SetupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI)
if err != nil {
return err
}
packageService := wskClient.Packages
name := d.Get("name").(string)
var qualifiedName = new(QualifiedName)
if qualifiedName, err = NewQualifiedName(name); err != nil {
return NewQualifiedNameError(name, err)
}
payload := whisk.Package{
Name: qualifiedName.GetEntityName(),
Namespace: qualifiedName.GetNamespace(),
}
userDefinedAnnotations := d.Get("user_defined_annotations").(string)
payload.Annotations, err = flex.ExpandAnnotations(userDefinedAnnotations)
if err != nil {
return err
}
userDefinedParameters := d.Get("user_defined_parameters").(string)
payload.Parameters, err = flex.ExpandParameters(userDefinedParameters)
if err != nil {
return err
}
if publish, ok := d.GetOk("publish"); ok {
p := publish.(bool)
payload.Publish = &p
}
if v, ok := d.GetOk("bind_package_name"); ok {
var BindingQualifiedName = new(QualifiedName)
if BindingQualifiedName, err = NewQualifiedName(v.(string)); err != nil {
return NewQualifiedNameError(v.(string), err)
}
BindingPayload := whisk.Binding{
Name: BindingQualifiedName.GetEntityName(),
Namespace: BindingQualifiedName.GetNamespace(),
}
payload.Binding = &BindingPayload
}
log.Println("[INFO] Creating IBM CLoud Function package")
result, _, err := packageService.Insert(&payload, false)
if err != nil {
return fmt.Errorf("[ERROR] Error creating IBM CLoud Function package: %s", err)
}
d.SetId(fmt.Sprintf("%s:%s", namespace, result.Name))
return resourceIBMFunctionPackageRead(d, meta)
}
func resourceIBMFunctionPackageRead(d *schema.ResourceData, meta interface{}) error {
parts, err := flex.CfIdParts(d.Id())
if err != nil {
return err
}
namespace := ""
packageID := ""
if len(parts) == 2 {
namespace = parts[0]
packageID = parts[1]
} else {
namespace = os.Getenv("FUNCTION_NAMESPACE")
packageID = parts[0]
d.SetId(fmt.Sprintf("%s:%s", namespace, packageID))
}
functionNamespaceAPI, err := meta.(conns.ClientSession).FunctionIAMNamespaceAPI()
if err != nil {
return err
}
bxSession, err := meta.(conns.ClientSession).BluemixSession()
if err != nil {
return err
}
wskClient, err := conns.SetupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI)
if err != nil {
return err
}
packageService := wskClient.Packages
pkg, _, err := packageService.Get(packageID)
if err != nil {
return fmt.Errorf("[ERROR] Error retrieving IBM Cloud Function package %s : %s", packageID, err)
}
d.Set("package_id", pkg.Name)
d.Set("name", pkg.Name)
d.Set("namespace", namespace)
d.Set("publish", pkg.Publish)
d.Set("version", pkg.Version)
annotations, err := flex.FlattenAnnotations(pkg.Annotations)
if err != nil {
return err
}
d.Set("annotations", annotations)
parameters, err := flex.FlattenParameters(pkg.Parameters)
if err != nil {
return err
}
d.Set("parameters", parameters)
if flex.IsEmpty(*pkg.Binding) {
d.Set("user_defined_annotations", annotations)
d.Set("user_defined_parameters", parameters)
} else {
d.Set("bind_package_name", fmt.Sprintf("/%s/%s", pkg.Binding.Namespace, pkg.Binding.Name))
c, err := whisk.NewClient(http.DefaultClient, &whisk.Config{
Namespace: pkg.Binding.Namespace,
AuthToken: wskClient.AuthToken,
Host: wskClient.Host,
AdditionalHeaders: wskClient.AdditionalHeaders,
})
if err != nil {
return err
}
bindedPkg, _, err := c.Packages.Get(pkg.Binding.Name)
if err != nil {
return fmt.Errorf("[ERROR] Error retrieving Binded IBM Cloud Function package %s : %s", pkg.Binding.Name, err)
}
userAnnotations, err := flex.FlattenAnnotations(flex.FilterInheritedAnnotations(bindedPkg.Annotations, pkg.Annotations))
if err != nil {
return err
}
d.Set("user_defined_annotations", userAnnotations)
userParameters, err := flex.FlattenParameters(flex.FilterInheritedParameters(bindedPkg.Parameters, pkg.Parameters))
if err != nil {
return err
}
d.Set("user_defined_parameters", userParameters)
}
return nil
}
func resourceIBMFunctionPackageUpdate(d *schema.ResourceData, meta interface{}) error {
parts, err := flex.CfIdParts(d.Id())
if err != nil {
return err
}
namespace := parts[0]
functionNamespaceAPI, err := meta.(conns.ClientSession).FunctionIAMNamespaceAPI()
if err != nil {
return err
}
bxSession, err := meta.(conns.ClientSession).BluemixSession()
if err != nil {
return err
}
wskClient, err := conns.SetupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI)
if err != nil {
return err
}
packageService := wskClient.Packages
var qualifiedName = new(QualifiedName)
if qualifiedName, err = NewQualifiedName(d.Get("name").(string)); err != nil {
return NewQualifiedNameError(d.Get("name").(string), err)
}
payload := whisk.Package{
Name: qualifiedName.GetEntityName(),
Namespace: qualifiedName.GetNamespace(),
}
ischanged := false
if d.HasChange("publish") {
p := d.Get("publish").(bool)
payload.Publish = &p
ischanged = true
}
if d.HasChange("user_defined_parameters") {
var err error
payload.Parameters, err = flex.ExpandParameters(d.Get("user_defined_parameters").(string))
if err != nil {
return err
}
ischanged = true
}
if d.HasChange("user_defined_annotations") {
var err error
payload.Annotations, err = flex.ExpandAnnotations(d.Get("user_defined_annotations").(string))
if err != nil {
return err
}
ischanged = true
}
if ischanged {
log.Println("[INFO] Update IBM Cloud Function Package")
_, _, err = packageService.Insert(&payload, true)
if err != nil {
return fmt.Errorf("[ERROR] Error updating IBM Cloud Function Package: %s", err)
}
}
return resourceIBMFunctionPackageRead(d, meta)
}
func resourceIBMFunctionPackageDelete(d *schema.ResourceData, meta interface{}) error {
parts, err := flex.CfIdParts(d.Id())
if err != nil {
return err
}
namespace := parts[0]
packageID := parts[1]
functionNamespaceAPI, err := meta.(conns.ClientSession).FunctionIAMNamespaceAPI()
if err != nil {
return err
}
bxSession, err := meta.(conns.ClientSession).BluemixSession()
if err != nil {
return err
}
wskClient, err := conns.SetupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI)
if err != nil {
return err
}
packageService := wskClient.Packages
_, err = packageService.Delete(packageID)
if err != nil {
return fmt.Errorf("[ERROR] Error deleting IBM Cloud Function Package: %s", err)
}
d.SetId("")
return nil
}
func resourceIBMFunctionPackageExists(d *schema.ResourceData, meta interface{}) (bool, error) {
parts, err := flex.CfIdParts(d.Id())
if err != nil {
return false, err
}
namespace := ""
packageID := ""
if len(parts) == 2 {
namespace = parts[0]
packageID = parts[1]
} else {
namespace = os.Getenv("FUNCTION_NAMESPACE")
packageID = parts[0]
d.SetId(fmt.Sprintf("%s:%s", namespace, packageID))
}
functionNamespaceAPI, err := meta.(conns.ClientSession).FunctionIAMNamespaceAPI()
if err != nil {
return false, err
}
bxSession, err := meta.(conns.ClientSession).BluemixSession()
if err != nil {
return false, err
}
wskClient, err := conns.SetupOpenWhiskClientConfig(namespace, bxSession, functionNamespaceAPI)
if err != nil {
return false, err
}
packageService := wskClient.Packages
pkg, resp, err := packageService.Get(packageID)
if err != nil {
if resp.StatusCode == 404 {
return false, nil
}
return false, fmt.Errorf("[ERROR] Error communicating with IBM Cloud Function Client : %s", err)
}
return pkg.Name == packageID, nil
}
| [
"\"FUNCTION_NAMESPACE\"",
"\"FUNCTION_NAMESPACE\""
]
| []
| [
"FUNCTION_NAMESPACE"
]
| [] | ["FUNCTION_NAMESPACE"] | go | 1 | 0 | |
x-pack/packetbeat/magefile.go | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
//go:build mage
// +build mage
package main
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/magefile/mage/mg"
"github.com/magefile/mage/sh"
devtools "github.com/elastic/beats/v7/dev-tools/mage"
"github.com/elastic/beats/v7/dev-tools/mage/target/build"
packetbeat "github.com/elastic/beats/v7/packetbeat/scripts/mage"
//mage:import
"github.com/elastic/beats/v7/dev-tools/mage/target/common"
//mage:import
_ "github.com/elastic/beats/v7/dev-tools/mage/target/compose"
//mage:import
_ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest"
//mage:import
"github.com/elastic/beats/v7/dev-tools/mage/target/test"
)
// NpcapVersion specifies the version of the OEM Npcap installer to bundle with
// the packetbeat executable. It is used to specify which npcap builder crossbuild
// image to use and the installer to obtain from the cloud store for testing.
const (
NpcapVersion = "1.60"
installer = "npcap-" + NpcapVersion + "-oem.exe"
)
func init() {
common.RegisterCheckDeps(Update)
test.RegisterDeps(SystemTest)
devtools.BeatDescription = "Packetbeat analyzes network traffic and sends the data to Elasticsearch."
devtools.BeatLicense = "Elastic License"
}
// Update updates the generated files.
func Update() {
mg.SerialDeps(packetbeat.FieldsYML, Dashboards, Config)
}
// Config generates the config files.
func Config() error {
return devtools.Config(devtools.AllConfigTypes, packetbeat.ConfigFileParams(), ".")
}
// Dashboards packages kibana dashboards
func Dashboards() error {
return devtools.KibanaDashboards(devtools.OSSBeatDir("protos"))
}
// Build builds the Beat binary.
func Build() error {
return devtools.Build(devtools.DefaultBuildArgs())
}
// GolangCrossBuild build the Beat binary inside of the golang-builder.
// Do not use directly, use crossBuild instead.
func GolangCrossBuild() error {
if devtools.Platform.GOOS == "windows" && (devtools.Platform.GOARCH == "amd64" || devtools.Platform.GOARCH == "386") {
err := sh.Copy("./npcap/installer/"+installer, "/installer/"+installer)
if err != nil {
return fmt.Errorf("failed to copy Npcap installer into source tree: %w", err)
}
}
return packetbeat.GolangCrossBuild()
}
// CrossBuild cross-builds the beat for all target platforms.
//
// On Windows platforms, if CrossBuild is invoked with the environment variables
// CI or NPCAP_LOCAL set to "true", a private cross-build image is selected that
// provides the OEM Npcap installer for the build. This behaviour requires access
// to the private image.
func CrossBuild() error {
return devtools.CrossBuild(
// Run all builds serially to try to address failures that might be caused
// by concurrent builds. See https://github.com/elastic/beats/issues/24304.
devtools.Serially(),
devtools.ImageSelector(func(platform string) (string, error) {
image, err := devtools.CrossBuildImage(platform)
if err != nil {
return "", err
}
if os.Getenv("CI") != "true" && os.Getenv("NPCAP_LOCAL") != "true" {
return image, nil
}
if platform == "windows/amd64" || platform == "windows/386" {
image = strings.ReplaceAll(image, "beats-dev", "observability-ci") // Temporarily work around naming of npcap image.
image = strings.ReplaceAll(image, "main", "npcap-"+NpcapVersion+"-debian9")
}
return image, nil
}),
)
}
// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon).
func BuildGoDaemon() error {
return devtools.BuildGoDaemon()
}
// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker.
func CrossBuildGoDaemon() error {
return devtools.CrossBuildGoDaemon()
}
// AssembleDarwinUniversal merges the darwin/amd64 and darwin/arm64 into a single
// universal binary using `lipo`. It assumes the darwin/amd64 and darwin/arm64
// were built and only performs the merge.
func AssembleDarwinUniversal() error {
return build.AssembleDarwinUniversal()
}
// Package packages the Beat for distribution.
// Use SNAPSHOT=true to build snapshots.
// Use PLATFORMS to control the target platforms.
// Use VERSION_QUALIFIER to control the version qualifier.
func Package() {
start := time.Now()
defer func() { fmt.Println("package ran for", time.Since(start)) }()
if v, found := os.LookupEnv("AGENT_PACKAGING"); found && v != "" {
devtools.UseElasticBeatXPackReducedPackaging()
} else {
devtools.UseElasticBeatXPackPackaging()
}
devtools.PackageKibanaDashboardsFromBuildDir()
packetbeat.CustomizePackaging()
mg.Deps(Update)
mg.Deps(CrossBuild, CrossBuildGoDaemon)
mg.SerialDeps(devtools.Package, TestPackages)
}
// TestPackages tests the generated packages (i.e. file modes, owners, groups).
func TestPackages() error {
return devtools.TestPackages()
}
func SystemTest(ctx context.Context) error {
mg.SerialDeps(getNpcapInstaller, devtools.BuildSystemTestBinary)
args := devtools.DefaultGoTestIntegrationArgs()
args.Packages = []string{"./tests/system/..."}
return devtools.GoTest(ctx, args)
}
// getNpcapInstaller gets the installer from the Google Cloud Storage service.
//
// On Windows platforms, if getNpcapInstaller is invoked with the environment variables
// CI or NPCAP_LOCAL set to "true" and the OEM Npcap installer is not available it is
// obtained from the cloud storage. This behaviour requires access to the private store.
// If NPCAP_LOCAL is set to "true" and the file is in the npcap/installer directory, no
// fetch will be made.
func getNpcapInstaller() error {
// TODO: Consider whether to expose this as a target.
if runtime.GOOS != "windows" {
return nil
}
if os.Getenv("CI") != "true" && os.Getenv("NPCAP_LOCAL") != "true" {
return errors.New("only available if running in the CI or with NPCAP_LOCAL=true")
}
dstPath := filepath.Join("./npcap/installer", installer)
if os.Getenv("NPCAP_LOCAL") == "true" {
fi, err := os.Stat(dstPath)
if err == nil && !fi.IsDir() {
fmt.Println("using local Npcap installer with NPCAP_LOCAL=true")
return nil
}
if !errors.Is(err, fs.ErrNotExist) {
return err
}
}
fmt.Printf("getting %s from private cache\n", installer)
return sh.RunV("gsutil", "cp", "gs://obs-ci-cache/private/"+installer, dstPath)
}
| [
"\"CI\"",
"\"NPCAP_LOCAL\"",
"\"CI\"",
"\"NPCAP_LOCAL\"",
"\"NPCAP_LOCAL\""
]
| []
| [
"NPCAP_LOCAL",
"CI"
]
| [] | ["NPCAP_LOCAL", "CI"] | go | 2 | 0 | |
db_test.go | package sqlreflect
import (
"database/sql"
"fmt"
"os"
"testing"
"github.com/Masterminds/squirrel"
_ "github.com/lib/pq"
)
// These are functional tests.
// To point these toward a valid database, set $SQLREFLECT_DB in your
// environment.
//
// SQLREFLECT_DB="user=foo dbname=bar" go test ./...
const tCatalog = "sqlreflect"
var dbConnStr = "user=mbutcher dbname=sqlreflect sslmode=disable"
var dbDriverStr = "postgres"
var db *sql.DB
func TestMain(m *testing.M) {
if len(sql.Drivers()) == 0 {
fmt.Println("No database drivers for testing")
os.Exit(1)
}
if cstr := os.Getenv("SQLREFLECT_DB"); len(cstr) > 0 {
dbConnStr = cstr
}
c, err := sql.Open(dbDriverStr, dbConnStr)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if err := c.Ping(); err != nil {
fmt.Println(err)
os.Exit(2)
}
db = c
if err := setup(c); err != nil {
fmt.Println(err)
os.Exit(2)
}
// Run the tests
exit := m.Run()
if err := teardown(c); err != nil {
fmt.Println(err)
os.Exit(2)
}
c.Close()
os.Exit(exit)
}
func TestSchemaInfo(t *testing.T) {
if err := db.Ping(); err != nil {
t.Error("failed ping")
}
opts := NewDBOptions(db, "postgres")
si := New(opts)
if !si.Supported() {
t.Fatal("Unsupported database")
}
}
func TestSchemaInfo_Tables(t *testing.T) {
si := New(DBOptions{Driver: "postgres", Queryer: squirrel.NewStmtCacheProxy(db)})
tables, err := si.Tables("", "")
if err != nil {
t.Fatal(err)
}
if n := len(tables); n < len(createTables()) {
t.Errorf("Unexpected number of tables: %d", n)
}
// Try again, but with DB name set.
tables2, err := si.Tables(tCatalog, "")
if err != nil {
t.Fatal(err)
}
if n := len(tables2); n != len(createTables()) {
t.Errorf("Unexpected number of tables: %d", n)
for _, ttt := range tables2 {
t.Logf("%s.%s", ttt.TableCatalog, ttt.TableNameField)
}
}
found := false
for _, tt := range tables {
if tt.TableNameField == "person" {
found = true
}
}
if !found {
t.Error("Did not find table 'person'")
}
}
func TestSchemaInfo_Table(t *testing.T) {
si := New(DBOptions{Driver: "postgres", Queryer: squirrel.NewStmtCacheProxy(db)})
table, err := si.Table("person", tCatalog, "")
if err != nil {
t.Fatal(err)
}
if table.TableNameField != "person" {
t.Fatal("Expected person, got %q", table.TableNameField)
}
// Make sure we can't accidentally look up a view with this command.
table, err = si.Table("person_name", tCatalog, "")
if err == nil {
t.Fatal("Expected to fail table lookup of a view.")
}
}
func TestSchemaInfo_Views(t *testing.T) {
si := New(DBOptions{Driver: "postgres", Queryer: squirrel.NewStmtCacheProxy(db)})
views, err := si.Views("", "")
if err != nil {
t.Fatal(err)
}
if n := len(views); n != len(createViews()) {
t.Errorf("Unexpected number of tables: %d", n)
}
// TODO: Should probably create a view in the schema creation and test it
// here.
}
func TestSchemaInfo_View(t *testing.T) {
si := New(DBOptions{Driver: "postgres", Queryer: squirrel.NewStmtCacheProxy(db)})
table, err := si.View("person_name", tCatalog, "")
if err != nil {
t.Fatal(err)
}
if table.TableNameField != "person_name" {
t.Fatal("Expected person, got %q", table.TableNameField)
}
}
func setup(db *sql.DB) error {
for _, s := range createTables() {
if _, err := db.Exec(s); err != nil {
fmt.Println("Setup failed. Cleaning up")
teardown(db)
return fmt.Errorf("Statement %q failed: %s", s, err)
}
}
for _, s := range createViews() {
if _, err := db.Exec(s); err != nil {
fmt.Println("Setup failed. Cleaning up")
teardown(db)
return fmt.Errorf("Statement %q failed: %s", s, err)
}
}
return nil
}
func teardown(db *sql.DB) error {
var last error
for _, s := range dropViews() {
if _, err := db.Exec(s); err != nil {
last = fmt.Errorf("Statement %q failed: %s", s, err)
fmt.Println(last)
}
}
for _, s := range dropTables() {
if _, err := db.Exec(s); err != nil {
last = fmt.Errorf("Statement %q failed: %s", s, err)
fmt.Println(last)
}
}
return last
}
func createTables() []string {
return []string{
`CREATE TABLE person (
id SERIAL,
first_name VARCHAR DEFAULT '',
last_name VARCHAR DEFAULT '',
PRIMARY KEY (id)
)`,
`CREATE TABLE org (
id SERIAL,
name VARCHAR DEFAULT '',
president INTEGER DEFAULT 0 REFERENCES person(id) ON DELETE SET NULL,
PRIMARY KEY (id)
)`,
`CREATE TABLE employees (
id SERIAL,
org INTEGER DEFAULT 0 REFERENCES org(id),
-- Docs suggest this will use primary key. Useful for testing.
person INTEGER DEFAULT 0 REFERENCES person,
PRIMARY KEY (id)
)`,
}
}
func createViews() []string {
return []string{
`CREATE VIEW person_name AS
SELECT concat(first_name, last_name) AS full_name FROM person;
`,
}
}
func dropTables() []string {
return []string{
`DROP TABLE employees`,
`DROP TABLE org`,
`DROP TABLE person`,
}
}
func dropViews() []string {
return []string{
`DROP VIEW person_name`,
}
}
| [
"\"SQLREFLECT_DB\""
]
| []
| [
"SQLREFLECT_DB"
]
| [] | ["SQLREFLECT_DB"] | go | 1 | 0 | |
obj/obj_export_obj_test.go | package obj
import (
"os"
"testing"
)
func TestExportObjFile(t *testing.T) {
if os.Getenv("SINGLE_TEST") != "1" {
return
}
obj := &ObjData{}
err := exportObjFile(obj, "../eq/tmp/out.obj")
if err != nil {
t.Fatalf("exportObjFile: %s", err)
}
t.Fatalf("%+v", obj)
}
| [
"\"SINGLE_TEST\""
]
| []
| [
"SINGLE_TEST"
]
| [] | ["SINGLE_TEST"] | go | 1 | 0 | |
docs/conf.py | # -*- coding: utf-8 -*-
#
# Pymote documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 07 19:26:40 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
if os.environ.get('READTHEDOCS', None) == 'True':
sys.path.insert(0,'.')
from readthedocs import *
sys.path.pop(0)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary','sphinx.ext.todo','sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode']
# generate autosummary pages
autosummary_generate = True
# include class and __init__ docstring
autoclass_content = 'both'
#http://sphinx-doc.org/latest/ext/autodoc.htm#confval-autodoc_member_order
autodoc_member_order = 'alphabetical'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pymote'
copyright = u'2011-2013, Damir Arbula'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
from pymote import __version__
except ImportError:
# avoid pymote requirements on rtd server
sys.path.append(os.path.abspath(os.path.join('..', 'pymote')))
tmp = __import__('release', globals(), locals(), 'version', 0)
sys.path.pop()
__version__ = tmp.version
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__.replace('_','')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pymotedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Pymote.tex', u'Pymote Documentation',
u'Damir Arbula', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymote', u'Pymote Documentation',
[u'Damir Arbula'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pymote', u'Pymote Documentation',
u'Damir Arbula', 'Pymote', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
testcases/OpenStack/vPing/vPing_userdata.py | #!/usr/bin/python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# 0.1: This script boots the VM1 and allocates IP address from Nova
# Later, the VM2 boots then execute cloud-init to ping VM1.
# After successful ping, both the VMs are deleted.
# 0.2: measure test duration and publish results under json format
# 0.3: adapt push 2 DB after Test API refacroting
#
#
import argparse
import datetime
import os
import pprint
import sys
import time
import yaml
from novaclient import client as novaclient
from neutronclient.v2_0 import client as neutronclient
from keystoneclient.v2_0 import client as keystoneclient
from glanceclient import client as glanceclient
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as functest_utils
import functest.utils.openstack_utils as openstack_utils
pp = pprint.PrettyPrinter(indent=4)
parser = argparse.ArgumentParser()
image_exists = False
parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
args = parser.parse_args()
""" logging configuration """
logger = ft_logger.Logger("vping_userdata").getLogger()
REPO_PATH = os.environ['repos_dir'] + '/functest/'
if not os.path.exists(REPO_PATH):
logger.error("Functest repository directory not found '%s'" % REPO_PATH)
exit(-1)
with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
functest_yaml = yaml.safe_load(f)
f.close()
HOME = os.environ['HOME'] + "/"
# vPing parameters
VM_BOOT_TIMEOUT = 180
VM_DELETE_TIMEOUT = 100
PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout")
TEST_DB = functest_yaml.get("results").get("test_db_url")
NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1")
NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2")
GLANCE_IMAGE_NAME = functest_yaml.get("vping").get("image_name")
GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get(
"openstack").get("image_file_name")
GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get(
"openstack").get("image_disk_format")
GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
"dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
FLAVOR = functest_yaml.get("vping").get("vm_flavor")
# NEUTRON Private Network parameters
PRIVATE_NET_NAME = functest_yaml.get("vping").get(
"vping_private_net_name")
PRIVATE_SUBNET_NAME = functest_yaml.get("vping").get(
"vping_private_subnet_name")
PRIVATE_SUBNET_CIDR = functest_yaml.get("vping").get(
"vping_private_subnet_cidr")
ROUTER_NAME = functest_yaml.get("vping").get("vping_router_name")
SECGROUP_NAME = functest_yaml.get("vping").get("vping_sg_name")
SECGROUP_DESCR = functest_yaml.get("vping").get("vping_sg_descr")
def pMsg(value):
"""pretty printing"""
pp.pprint(value)
def waitVmActive(nova, vm):
# sleep and wait for VM status change
sleep_time = 3
count = VM_BOOT_TIMEOUT / sleep_time
while True:
status = openstack_utils.get_instance_status(nova, vm)
logger.debug("Status: %s" % status)
if status == "ACTIVE":
return True
if status == "ERROR" or status == "error":
return False
if count == 0:
logger.debug("Booting a VM timed out...")
return False
count -= 1
time.sleep(sleep_time)
return False
def waitVmDeleted(nova, vm):
# sleep and wait for VM status change
sleep_time = 3
count = VM_DELETE_TIMEOUT / sleep_time
while True:
status = openstack_utils.get_instance_status(nova, vm)
if not status:
return True
elif count == 0:
logger.debug("Timeout")
return False
else:
# return False
count -= 1
time.sleep(sleep_time)
return False
def create_security_group(neutron_client):
sg_id = openstack_utils.get_security_group_id(neutron_client,
SECGROUP_NAME)
if sg_id != '':
logger.info("Using existing security group '%s'..." % SECGROUP_NAME)
else:
logger.info("Creating security group '%s'..." % SECGROUP_NAME)
SECGROUP = openstack_utils.create_security_group(neutron_client,
SECGROUP_NAME,
SECGROUP_DESCR)
if not SECGROUP:
logger.error("Failed to create the security group...")
return False
sg_id = SECGROUP['id']
logger.debug("Security group '%s' with ID=%s created successfully."
% (SECGROUP['name'], sg_id))
logger.debug("Adding ICMP rules in security group '%s'..."
% SECGROUP_NAME)
if not openstack_utils.create_secgroup_rule(neutron_client, sg_id,
'ingress', 'icmp'):
logger.error("Failed to create the security group rule...")
return False
logger.debug("Adding SSH rules in security group '%s'..."
% SECGROUP_NAME)
if not openstack_utils.create_secgroup_rule(neutron_client, sg_id,
'ingress', 'tcp',
'22', '22'):
logger.error("Failed to create the security group rule...")
return False
if not openstack_utils.create_secgroup_rule(neutron_client, sg_id,
'egress', 'tcp',
'22', '22'):
logger.error("Failed to create the security group rule...")
return False
return sg_id
def main():
creds_nova = openstack_utils.get_credentials("nova")
nova_client = novaclient.Client('2', **creds_nova)
creds_neutron = openstack_utils.get_credentials("neutron")
neutron_client = neutronclient.Client(**creds_neutron)
creds_keystone = openstack_utils.get_credentials("keystone")
keystone_client = keystoneclient.Client(**creds_keystone)
glance_endpoint = keystone_client.service_catalog.url_for(
service_type='image', endpoint_type='publicURL')
glance_client = glanceclient.Client(1, glance_endpoint,
token=keystone_client.auth_token)
EXIT_CODE = -1
image_id = None
flavor = None
# Check if the given image exists
image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
if image_id != '':
logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME)
global image_exists
image_exists = True
else:
logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
GLANCE_IMAGE_PATH))
image_id = openstack_utils.create_glance_image(glance_client,
GLANCE_IMAGE_NAME,
GLANCE_IMAGE_PATH)
if not image_id:
logger.error("Failed to create a Glance image...")
return(EXIT_CODE)
logger.debug("Image '%s' with ID=%s created successfully."
% (GLANCE_IMAGE_NAME, image_id))
network_dic = openstack_utils.create_network_full(logger,
neutron_client,
PRIVATE_NET_NAME,
PRIVATE_SUBNET_NAME,
ROUTER_NAME,
PRIVATE_SUBNET_CIDR)
if not network_dic:
logger.error(
"There has been a problem when creating the neutron network")
return(EXIT_CODE)
network_id = network_dic["net_id"]
create_security_group(neutron_client)
# Check if the given flavor exists
try:
flavor = nova_client.flavors.find(name=FLAVOR)
logger.info("Flavor found '%s'" % FLAVOR)
except:
logger.error("Flavor '%s' not found." % FLAVOR)
logger.info("Available flavors are: ")
pMsg(nova_client.flavor.list())
exit(-1)
# Deleting instances if they exist
servers = nova_client.servers.list()
for server in servers:
if server.name == NAME_VM_1 or server.name == NAME_VM_2:
logger.info("Instance %s found. Deleting..." % server.name)
server.delete()
# boot VM 1
# basic boot
# tune (e.g. flavor, images, network) to your specific
# openstack configuration here
# we consider start time at VM1 booting
start_time = time.time()
stop_time = start_time
logger.info("vPing Start Time:'%s'" % (
datetime.datetime.fromtimestamp(start_time).strftime(
'%Y-%m-%d %H:%M:%S')))
# create VM
logger.info("Creating instance '%s'..." % NAME_VM_1)
logger.debug(
"Configuration:\n name=%s \n flavor=%s \n image=%s \n "
"network=%s \n" % (NAME_VM_1, flavor, image_id, network_id))
vm1 = nova_client.servers.create(
name=NAME_VM_1,
flavor=flavor,
image=image_id,
config_drive=True,
nics=[{"net-id": network_id}]
)
# wait until VM status is active
if not waitVmActive(nova_client, vm1):
logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1)))
return (EXIT_CODE)
else:
logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)
# Retrieve IP of first VM
test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0]
logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip))
# boot VM 2
# we will boot then execute a ping script with cloud-init
# the long chain corresponds to the ping procedure converted with base 64
# tune (e.g. flavor, images, network) to your specific openstack
# configuration here
u = ("#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n "
"RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n echo 'vPing OK'\n "
"break\n else\n echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip)
# create VM
logger.info("Creating instance '%s'..." % NAME_VM_2)
logger.debug(
"Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s "
"\n userdata= \n%s" % (
NAME_VM_2, flavor, image_id, network_id, u))
vm2 = nova_client.servers.create(
name=NAME_VM_2,
flavor=flavor,
image=image_id,
nics=[{"net-id": network_id}],
config_drive=True,
userdata=u
)
if not waitVmActive(nova_client, vm2):
logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2)))
return (EXIT_CODE)
else:
logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)
logger.info("Waiting for ping...")
sec = 0
metadata_tries = 0
console_log = vm2.get_console_output()
duration = 0
stop_time = time.time()
while True:
time.sleep(1)
console_log = vm2.get_console_output()
# print "--"+console_log
# report if the test is failed
if "vPing OK" in console_log:
logger.info("vPing detected!")
# we consider start time at VM1 booting
stop_time = time.time()
duration = round(stop_time - start_time, 1)
logger.info("vPing duration:'%s'" % duration)
EXIT_CODE = 0
break
elif ("failed to read iid from metadata" in console_log or
metadata_tries > 5):
EXIT_CODE = -2
break
elif sec == PING_TIMEOUT:
logger.info("Timeout reached.")
break
elif sec % 10 == 0:
if "request failed" in console_log:
logger.debug("It seems userdata is not supported in "
"nova boot. Waiting a bit...")
metadata_tries += 1
else:
logger.debug("Pinging %s. Waiting for response..." % test_ip)
sec += 1
test_status = "NOK"
if EXIT_CODE == 0:
logger.info("vPing OK")
test_status = "OK"
elif EXIT_CODE == -2:
duration = 0
logger.info("Userdata is not supported in nova boot. Aborting test...")
else:
duration = 0
logger.error("vPing FAILED")
if args.report:
try:
logger.debug("Pushing vPing userdata results into DB...")
functest_utils.push_results_to_db("functest",
"vping_userdata",
logger,
start_time,
stop_time,
test_status,
details={'timestart': start_time,
'duration': duration,
'status': test_status})
except:
logger.error("Error pushing results into Database '%s'"
% sys.exc_info()[0])
exit(EXIT_CODE)
if __name__ == '__main__':
main()
| []
| []
| [
"repos_dir",
"CONFIG_FUNCTEST_YAML",
"HOME"
]
| [] | ["repos_dir", "CONFIG_FUNCTEST_YAML", "HOME"] | python | 3 | 0 | |
api.py | import uuid
from flask import Flask, render_template, request, jsonify
import os
from lotify.client import Client
app = Flask(__name__)
CLIENT_ID = os.getenv("LINE_CLIENT_ID")
SECRET = os.getenv("LINE_CLIENT_SECRET")
URI = os.getenv("LINE_REDIRECT_URI")
lotify = Client(client_id=CLIENT_ID, client_secret=SECRET, redirect_uri=URI)
# trigger
@app.route("/")
def home():
link = lotify.get_auth_link(state=uuid.uuid4())
return render_template("notify_index.html", auth_url=link)
@app.route("/callback")
def confirm():
token = lotify.get_access_token(code=request.args.get("code"))
return render_template("notify_confirm.html", token=token)
@app.route("/notify/send", methods=["POST"])
def send():
payload = request.get_json()
response = lotify.send_message(
access_token=payload.get("token"), message=payload.get("message")
)
return jsonify(result=response.get("message")), response.get("status")
@app.route("/notify/send/sticker", methods=["POST"])
def send_sticker():
payload = request.get_json()
response = lotify.send_message_with_sticker(
access_token=payload.get("token"),
message=payload.get("message"),
sticker_id=630,
sticker_package_id=4,
)
return jsonify(result=response.get("message")), response.get("status")
@app.route("/notify/send/url", methods=["POST"])
def send_url():
payload = request.get_json()
response = lotify.send_message_with_image_url(
access_token=payload.get("token"),
message=payload.get("message"),
image_fullsize=payload.get("url"),
image_thumbnail=payload.get("url"),
)
return jsonify(result=response.get("message")), response.get("status")
@app.route("/notify/send/path", methods=["POST"])
def send_file():
payload = request.get_json()
response = lotify.send_message_with_image_file(
access_token=payload.get("token"),
message=payload.get("message"),
file=open("./test_data/dog.png", "rb"),
)
return jsonify(result=response.get("message")), response.get("status")
@app.route("/notify/revoke", methods=["POST"])
def revoke():
payload = request.get_json()
response = lotify.revoke(access_token=payload.get("token"))
return jsonify(result=response.get("message")), response.get("status")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| []
| []
| [
"LINE_CLIENT_ID",
"LINE_CLIENT_SECRET",
"LINE_REDIRECT_URI"
]
| [] | ["LINE_CLIENT_ID", "LINE_CLIENT_SECRET", "LINE_REDIRECT_URI"] | python | 3 | 0 | |
examples/http_server/http_server.go | package main
import (
"github.com/datatyp/sarama"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
)
var (
addr = flag.String("addr", ":8080", "The address to bind to")
brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list")
verbose = flag.Bool("verbose", false, "Turn on Sarama logging")
certFile = flag.String("certificate", "", "The optional certificate file for client authentication")
keyFile = flag.String("key", "", "The optional key file for client authentication")
caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication")
verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain")
)
func main() {
flag.Parse()
if *verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
if *brokers == "" {
flag.PrintDefaults()
os.Exit(1)
}
brokerList := strings.Split(*brokers, ",")
log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
server := &Server{
DataCollector: newDataCollector(brokerList),
AccessLogProducer: newAccessLogProducer(brokerList),
}
defer func() {
if err := server.Close(); err != nil {
log.Println("Failed to close server", err)
}
}()
log.Fatal(server.Run(*addr))
}
func createTlsConfiguration() (t *tls.Config) {
if *certFile != "" && *keyFile != "" && *caFile != "" {
cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
if err != nil {
log.Fatal(err)
}
caCert, err := ioutil.ReadFile(*caFile)
if err != nil {
log.Fatal(err)
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
t = &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
InsecureSkipVerify: *verifySsl,
}
}
// will be nil by default if nothing is provided
return t
}
type Server struct {
DataCollector sarama.SyncProducer
AccessLogProducer sarama.AsyncProducer
}
func (s *Server) Close() error {
if err := s.DataCollector.Close(); err != nil {
log.Println("Failed to shut down data collector cleanly", err)
}
if err := s.AccessLogProducer.Close(); err != nil {
log.Println("Failed to shut down access log producer cleanly", err)
}
return nil
}
func (s *Server) Handler() http.Handler {
return s.withAccessLog(s.collectQueryStringData())
}
func (s *Server) Run(addr string) error {
httpServer := &http.Server{
Addr: addr,
Handler: s.Handler(),
}
log.Printf("Listening for requests on %s...\n", addr)
return httpServer.ListenAndServe()
}
func (s *Server) collectQueryStringData() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
// We are not setting a message key, which means that all messages will
// be distributed randomly over the different partitions.
partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{
Topic: "important",
Value: sarama.StringEncoder(r.URL.RawQuery),
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Failed to store your data:, %s", err)
} else {
// The tuple (topic, partition, offset) can be used as a unique identifier
// for a message in a Kafka cluster.
fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset)
}
})
}
type accessLogEntry struct {
Method string `json:"method"`
Host string `json:"host"`
Path string `json:"path"`
IP string `json:"ip"`
ResponseTime float64 `json:"response_time"`
encoded []byte
err error
}
func (ale *accessLogEntry) ensureEncoded() {
if ale.encoded == nil && ale.err == nil {
ale.encoded, ale.err = json.Marshal(ale)
}
}
func (ale *accessLogEntry) Length() int {
ale.ensureEncoded()
return len(ale.encoded)
}
func (ale *accessLogEntry) Encode() ([]byte, error) {
ale.ensureEncoded()
return ale.encoded, ale.err
}
func (s *Server) withAccessLog(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
started := time.Now()
next.ServeHTTP(w, r)
entry := &accessLogEntry{
Method: r.Method,
Host: r.Host,
Path: r.RequestURI,
IP: r.RemoteAddr,
ResponseTime: float64(time.Since(started)) / float64(time.Second),
}
// We will use the client's IP address as key. This will cause
// all the access log entries of the same IP address to end up
// on the same partition.
s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
Topic: "access_log",
Key: sarama.StringEncoder(r.RemoteAddr),
Value: entry,
}
})
}
func newDataCollector(brokerList []string) sarama.SyncProducer {
// For the data collector, we are looking for strong consistency semantics.
// Because we don't change the flush settings, sarama will try to produce messages
// as fast as possible to keep latency low.
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
config.Producer.Return.Successes = true
tlsConfig := createTlsConfiguration()
if tlsConfig != nil {
config.Net.TLS.Config = tlsConfig
config.Net.TLS.Enable = true
}
// On the broker side, you may want to change the following settings to get
// stronger consistency guarantees:
// - For your broker, set `unclean.leader.election.enable` to false
// - For the topic, you could increase `min.insync.replicas`.
producer, err := sarama.NewSyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
return producer
}
func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
// For the access log, we are looking for AP semantics, with high throughput.
// By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
config := sarama.NewConfig()
tlsConfig := createTlsConfiguration()
if tlsConfig != nil {
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
}
config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
config.Producer.Compression = sarama.CompressionSnappy // Compress messages
config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
producer, err := sarama.NewAsyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
// We will just log to STDOUT if we're not able to produce messages.
// Note: messages will only be returned here after all retry attempts are exhausted.
go func() {
for err := range producer.Errors() {
log.Println("Failed to write access log entry:", err)
}
}()
return producer
}
| [
"\"KAFKA_PEERS\""
]
| []
| [
"KAFKA_PEERS"
]
| [] | ["KAFKA_PEERS"] | go | 1 | 0 | |
bot.py | import os
import random
from discord.ext import commands
from dotenv import load_dotenv
import discord
from googletrans import Translator
import langdict
translator = Translator()
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
cogs = ['cogs.Essentials', 'cogs.Moderation', 'cogs.Fun']
print("Startup Command Recieved")
prefix = input("Please Enter the Designated Command Prefix: " )
status= input("Please Enter the Designated Bot Status: ")
status = "Prefix: "+prefix+" | "+status
bot = commands.Bot(command_prefix=prefix,owner_id=379786876204220417,case_insensitive=True)
#bot.change_presence(activity=discord.Game("something")
for cog in cogs:
bot.load_extension(cog)
print("Loaded "+cog)
print("")
@bot.event
async def on_ready():
#set status
game = discord.Game(status)
await bot.change_presence(status=discord.Status.online, activity=game)
#list guilds bot are in and active
for guild in bot.guilds:
print(
f'{bot.user} is connected to the following guild:\n'
f'{guild.name}(id: {guild.id})'
)
print("Bot Started. Welcome Developer. ")
#welcome
@bot.event
async def on_member_join(member):
await member.send('Hi {member.name}, welcome to the Discord server!')
#commands
@bot.command(name='members', help='list all members in the server')
async def members(ctx):
##await ctx.send("hi")
ID = ctx.guild.id
#print("SERVER ID: "+str(ID))
for server in bot.guilds:
if(ID == server.id):
#print("FOUND SERVER")
break
members = '\n - '.join([member.name for member in server.members])
await ctx.send(f'``` Members:\n - {members}```')
#translate command
@bot.command(name='translate', help = 'translates text to a desired language. Do '+prefix+'help translate for more info \n To use, first type the desired language and then type the sentence to translate in quotation marks (ex: "i am a person") \n For list of codes, go to \n https://py-googletrans.readthedocs.io/en/latest/#googletrans-languages')
async def translate(ctx, desired, text):
translated=(translator.translate(text, dest=desired)).text
originlanguage = translator.detect(text).lang
await ctx.send('` Translation from '+originlanguage+ ' to '+desired+
'\n Translated Text:`'+translated)
#rock paper scissors
'''
@bot.command(name='temp')
async def temp(ctx):
await ctx.send("Someone post this on another discord")
await ctx.send(file=discord.File('someone post this in another discord.jpg'))
'''
#hello and happ bday
@bot.event
async def on_message(message):
if message.author == bot.user:
return
helloquotes = ["Wassup Dude", "Hi. How are you?", "helllo from the other side",
"Gday Bro", "Hello", "hi"]
if message.content == 'hello':
response = random.choice(helloquotes)
await message.channel.send(response)
if 'happy birthday' in message.content.lower():
await message.channel.send('Happy Birthday! Congrats! ')
#else:
# text = message.content
# await message.channel.send((translator.translate(text)).text+" "+(translator.detect(text)).lang)
#b/c discord.py prioritzies on_message over commands, we need to process commands too
await bot.process_commands(message)
#autotranslate
@bot.listen()
async def on_message(message):
if message.author ==bot.user:
return
text = message.content
translatedraw = (translator.translate(text))
langraw = translator.detect(text)
translatedfinal = translatedraw.text
langfinal = langraw.lang
#print(text[0])
if(text[0]!= prefix):
if(len(text)>5):
if(langfinal.find("en")==-1):
await message.channel.send('`Automatic Translation from '+langfinal+ ' to english'+
'\n Translated Text:`'+translatedfinal)
'''
#automod
@bot.listen()
async def on_message(message):
if message.author==bot.user:
return
text = message.content
badwords = ['anal','anus','arse','ass','ballsack','balls','bastard','bitch','biatch','blowjob','blow job','bollock','bollok','boob','buttplug','clitoris','cock','coon','crap','cunt','dick','dildo','dyke','fag','feck','fellate','fellatio','felching','fuck','f u c k','fudgepacker','fudge packer','flange','homo','jerk','jizz','knobend','knob end','labia','muff','nigger','nigga','penis','piss','poop','prick','pube','pussy','queer','scrotum','sex','shit','s hit','sh1t','slut','smegma','spunk','tit','tosser'
'turd','twat','vagina','wank','whore','wtf', 'nibba', 's h i t']
for x in badwords:
if(x in text.lower()):
await message.channel.send("No Cursing PLS")
await message.delete()
return
'''
#run bot with token
bot.run(TOKEN)
| []
| []
| [
"DISCORD_TOKEN"
]
| [] | ["DISCORD_TOKEN"] | python | 1 | 0 | |
commands/pull_request.go | package commands
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/github/hub/git"
"github.com/github/hub/github"
"github.com/github/hub/utils"
)
var cmdPullRequest = &Command{
Run: pullRequest,
Usage: `
pull-request [-focpd] [-b <BASE>] [-h <HEAD>] [-r <REVIEWERS> ] [-a <ASSIGNEES>] [-M <MILESTONE>] [-l <LABELS>]
pull-request -m <MESSAGE> [--edit]
pull-request -F <FILE> [--edit]
pull-request -i <ISSUE>
`,
Long: `Create a GitHub Pull Request.
## Options:
-f, --force
Skip the check for unpushed commits.
-m, --message <MESSAGE>
The text up to the first blank line in <MESSAGE> is treated as the pull
request title, and the rest is used as pull request description in Markdown
format.
When multiple ''--message'' are passed, their values are concatenated with a
blank line in-between.
When neither ''--message'' nor ''--file'' were supplied, a text editor will open
to author the title and description in.
--no-edit
Use the message from the first commit on the branch as pull request title
and description without opening a text editor.
-F, --file <FILE>
Read the pull request title and description from <FILE>. Pass "-" to read
from standard input instead. See ''--message'' for the formatting rules.
-e, --edit
Open the pull request title and description in a text editor before
submitting. This can be used in combination with ''--message'' or ''--file''.
-i, --issue <ISSUE>
Convert <ISSUE> (referenced by its number) to a pull request.
You can only convert issues authored by you or that which you have admin
rights over. In most workflows it is not necessary to convert issues to
pull requests; you can simply reference the original issue in the body of
the new pull request.
-o, --browse
Open the new pull request in a web browser.
-c, --copy
Put the URL of the new pull request to clipboard instead of printing it.
-p, --push
Push the current branch to <HEAD> before creating the pull request.
-b, --base <BASE>
The base branch in the "[<OWNER>:]<BRANCH>" format. Defaults to the default
branch of the upstream repository (usually "master").
See the "CONVENTIONS" section of hub(1) for more information on how hub
selects the defaults in case of multiple git remotes.
-h, --head <HEAD>
The head branch in "[<OWNER>:]<BRANCH>" format. Defaults to the currently
checked out branch.
-r, --reviewer <USERS>
A comma-separated list (no spaces around the comma) of GitHub handles to
request a review from.
-a, --assign <USERS>
A comma-separated list (no spaces around the comma) of GitHub handles to
assign to this pull request.
-M, --milestone <NAME>
The milestone name to add to this pull request. Passing the milestone number
is deprecated.
-l, --labels <LABELS>
A comma-separated list (no spaces around the comma) of labels to add to
this pull request. Labels will be created if they do not already exist.
-d, --draft
Create the pull request as a draft.
--no-maintainer-edits
When creating a pull request from a fork, this disallows projects
maintainers from being able to push to the head branch of this fork.
Maintainer edits are allowed by default.
## Examples:
$ hub pull-request
[ opens a text editor for writing title and message ]
[ creates a pull request for the current branch ]
$ hub pull-request --base OWNER:master --head MYUSER:my-branch
[ creates a pull request with explicit base and head branches ]
$ hub pull-request --browse -m "My title"
[ creates a pull request with the given title and opens it in a browser ]
$ hub pull-request -F - --edit < path/to/message-template.md
[ further edit the title and message received on standard input ]
## Configuration:
* ''HUB_RETRY_TIMEOUT'':
The maximum time to keep retrying after HTTP 422 on ''--push'' (default: 9).
## See also:
hub(1), hub-merge(1), hub-checkout(1)
`,
}
func init() {
CmdRunner.Use(cmdPullRequest)
}
func pullRequest(cmd *Command, args *Args) {
localRepo, err := github.LocalRepo()
utils.Check(err)
currentBranch, currentBranchErr := localRepo.CurrentBranch()
baseProject, err := localRepo.MainProject()
utils.Check(err)
host, err := github.CurrentConfig().PromptForHost(baseProject.Host)
if err != nil {
utils.Check(github.FormatError("creating pull request", err))
}
client := github.NewClientWithHost(host)
trackedBranch, headProject, _ := localRepo.RemoteBranchAndProject(host.User, false)
if headProject == nil {
utils.Check(fmt.Errorf("could not determine project for head branch"))
}
var (
base, head string
)
if flagPullRequestBase := args.Flag.Value("--base"); flagPullRequestBase != "" {
baseProject, base = parsePullRequestProject(baseProject, flagPullRequestBase)
}
if flagPullRequestHead := args.Flag.Value("--head"); flagPullRequestHead != "" {
headProject, head = parsePullRequestProject(headProject, flagPullRequestHead)
}
baseRemote, _ := localRepo.RemoteForProject(baseProject)
if base == "" && baseRemote != nil {
base = localRepo.DefaultBranch(baseRemote).ShortName()
}
if head == "" && trackedBranch != nil {
if !trackedBranch.IsRemote() {
// the current branch tracking another branch
// pretend there's no upstream at all
trackedBranch = nil
} else {
if baseProject.SameAs(headProject) && base == trackedBranch.ShortName() {
e := fmt.Errorf(`Aborted: head branch is the same as base ("%s")`, base)
e = fmt.Errorf("%s\n(use `-h <branch>` to specify an explicit pull request head)", e)
utils.Check(e)
}
}
}
force := args.Flag.Bool("--force")
flagPullRequestPush := args.Flag.Bool("--push")
if head == "" {
if trackedBranch == nil {
utils.Check(currentBranchErr)
if !force && !flagPullRequestPush {
branchRemote, branchMerge, err := branchTrackingInformation(currentBranch)
if err != nil || (baseRemote != nil && branchRemote == baseRemote.Name && branchMerge.ShortName() == base) {
if localRepo.RemoteForBranch(currentBranch, host.User) == nil {
err = fmt.Errorf("Aborted: the current branch seems not yet pushed to a remote")
err = fmt.Errorf("%s\n(use `-p` to push the branch or `-f` to skip this check)", err)
utils.Check(err)
}
}
}
head = currentBranch.ShortName()
} else {
head = trackedBranch.ShortName()
}
}
if headRepo, err := client.Repository(headProject); err == nil {
headProject.Owner = headRepo.Owner.Login
headProject.Name = headRepo.Name
}
fullBase := fmt.Sprintf("%s:%s", baseProject.Owner, base)
fullHead := fmt.Sprintf("%s:%s", headProject.Owner, head)
if !force && trackedBranch != nil {
remoteCommits, err := git.RefList(trackedBranch.LongName(), "")
if err == nil && len(remoteCommits) > 0 {
err = fmt.Errorf("Aborted: %d commits are not yet pushed to %s", len(remoteCommits), trackedBranch.LongName())
err = fmt.Errorf("%s\n(use `-f` to force submit a pull request anyway)", err)
utils.Check(err)
}
}
messageBuilder := &github.MessageBuilder{
Filename: "PULLREQ_EDITMSG",
Title: "pull request",
}
baseTracking := base
headTracking := head
remote := baseRemote
if remote != nil {
baseTracking = fmt.Sprintf("%s/%s", remote.Name, base)
}
if remote == nil || !baseProject.SameAs(headProject) {
remote, _ = localRepo.RemoteForProject(headProject)
}
if remote != nil {
headTracking = fmt.Sprintf("%s/%s", remote.Name, head)
}
if flagPullRequestPush && remote == nil {
utils.Check(fmt.Errorf("Can't find remote for %s", head))
}
messageBuilder.AddCommentedSection(fmt.Sprintf(`Requesting a pull to %s from %s
Write a message for this pull request. The first block
of text is the title and the rest is the description.`, fullBase, fullHead))
flagPullRequestMessage := args.Flag.AllValues("--message")
flagPullRequestEdit := args.Flag.Bool("--edit")
flagPullRequestIssue := args.Flag.Value("--issue")
if !args.Flag.HasReceived("--issue") && args.ParamsSize() > 0 {
flagPullRequestIssue = parsePullRequestIssueNumber(args.GetParam(0))
}
if len(flagPullRequestMessage) > 0 {
messageBuilder.Message = strings.Join(flagPullRequestMessage, "\n\n")
messageBuilder.Edit = flagPullRequestEdit
} else if args.Flag.HasReceived("--file") {
messageBuilder.Message, err = msgFromFile(args.Flag.Value("--file"))
utils.Check(err)
messageBuilder.Edit = flagPullRequestEdit
} else if args.Flag.Bool("--no-edit") {
commits, _ := git.RefList(baseTracking, head)
if len(commits) == 0 {
utils.Check(fmt.Errorf("Aborted: no commits detected between %s and %s", baseTracking, head))
}
message, err := git.Show(commits[len(commits)-1])
utils.Check(err)
messageBuilder.Message = message
} else if flagPullRequestIssue == "" {
messageBuilder.Edit = true
headForMessage := headTracking
if flagPullRequestPush {
headForMessage = head
}
message := ""
commits, _ := git.RefList(baseTracking, headForMessage)
if len(commits) == 1 {
message, err = git.Show(commits[0])
utils.Check(err)
re := regexp.MustCompile(`\n(Co-authored-by|Signed-off-by):[^\n]+`)
message = re.ReplaceAllString(message, "")
} else if len(commits) > 1 {
commitLogs, err := git.Log(baseTracking, headForMessage)
utils.Check(err)
if commitLogs != "" {
messageBuilder.AddCommentedSection("\nChanges:\n\n" + strings.TrimSpace(commitLogs))
}
}
workdir, _ := git.WorkdirName()
if workdir != "" {
template, _ := github.ReadTemplate(github.PullRequestTemplate, workdir)
if template != "" {
message = message + "\n\n\n" + template
}
}
messageBuilder.Message = message
}
title, body, err := messageBuilder.Extract()
utils.Check(err)
if title == "" && flagPullRequestIssue == "" {
utils.Check(fmt.Errorf("Aborting due to empty pull request title"))
}
if flagPullRequestPush {
if args.Noop {
args.Before(fmt.Sprintf("Would push to %s/%s", remote.Name, head), "")
} else {
err = git.Spawn("push", "--set-upstream", remote.Name, fmt.Sprintf("HEAD:%s", head))
utils.Check(err)
}
}
milestoneNumber, err := milestoneValueToNumber(args.Flag.Value("--milestone"), client, baseProject)
utils.Check(err)
var pullRequestURL string
if args.Noop {
args.Before(fmt.Sprintf("Would request a pull request to %s from %s", fullBase, fullHead), "")
pullRequestURL = "PULL_REQUEST_URL"
} else {
params := map[string]interface{}{
"base": base,
"head": fullHead,
"maintainer_can_modify": !args.Flag.Bool("--no-maintainer-edits"),
}
if args.Flag.Bool("--draft") {
params["draft"] = true
}
if title != "" {
params["title"] = title
if body != "" {
params["body"] = body
}
} else {
issueNum, _ := strconv.Atoi(flagPullRequestIssue)
params["issue"] = issueNum
}
startedAt := time.Now()
numRetries := 0
retryDelay := 2
retryAllowance := 0
if flagPullRequestPush {
if allowanceFromEnv := os.Getenv("HUB_RETRY_TIMEOUT"); allowanceFromEnv != "" {
retryAllowance, err = strconv.Atoi(allowanceFromEnv)
utils.Check(err)
} else {
retryAllowance = 9
}
}
var pr *github.PullRequest
for {
pr, err = client.CreatePullRequest(baseProject, params)
if err != nil && strings.Contains(err.Error(), `Invalid value for "head"`) {
if retryAllowance > 0 {
retryAllowance -= retryDelay
time.Sleep(time.Duration(retryDelay) * time.Second)
retryDelay += 1
numRetries += 1
} else {
if numRetries > 0 {
duration := time.Since(startedAt)
err = fmt.Errorf("%s\nGiven up after retrying for %.1f seconds.", err, duration.Seconds())
}
break
}
} else {
break
}
}
if err == nil {
defer messageBuilder.Cleanup()
}
utils.Check(err)
pullRequestURL = pr.HtmlUrl
params = map[string]interface{}{}
flagPullRequestLabels := commaSeparated(args.Flag.AllValues("--labels"))
if len(flagPullRequestLabels) > 0 {
params["labels"] = flagPullRequestLabels
}
flagPullRequestAssignees := commaSeparated(args.Flag.AllValues("--assign"))
if len(flagPullRequestAssignees) > 0 {
params["assignees"] = flagPullRequestAssignees
}
if milestoneNumber > 0 {
params["milestone"] = milestoneNumber
}
if len(params) > 0 {
err = client.UpdateIssue(baseProject, pr.Number, params)
utils.Check(err)
}
flagPullRequestReviewers := commaSeparated(args.Flag.AllValues("--reviewer"))
if len(flagPullRequestReviewers) > 0 {
userReviewers := []string{}
teamReviewers := []string{}
for _, reviewer := range flagPullRequestReviewers {
if strings.Contains(reviewer, "/") {
teamName := strings.SplitN(reviewer, "/", 2)[1]
if !pr.HasRequestedTeam(teamName) {
teamReviewers = append(teamReviewers, teamName)
}
} else if !pr.HasRequestedReviewer(reviewer) {
userReviewers = append(userReviewers, reviewer)
}
}
if len(userReviewers) > 0 || len(teamReviewers) > 0 {
err = client.RequestReview(baseProject, pr.Number, map[string]interface{}{
"reviewers": userReviewers,
"team_reviewers": teamReviewers,
})
utils.Check(err)
}
}
}
args.NoForward()
printBrowseOrCopy(args, pullRequestURL, args.Flag.Bool("--browse"), args.Flag.Bool("--copy"))
}
func parsePullRequestProject(context *github.Project, s string) (p *github.Project, ref string) {
p = context
ref = s
if strings.Contains(s, ":") {
split := strings.SplitN(s, ":", 2)
ref = split[1]
var name string
if !strings.Contains(split[0], "/") {
name = context.Name
}
p = github.NewProject(split[0], name, context.Host)
}
return
}
func parsePullRequestIssueNumber(url string) string {
u, e := github.ParseURL(url)
if e != nil {
return ""
}
r := regexp.MustCompile(`^issues\/(\d+)`)
p := u.ProjectPath()
if r.MatchString(p) {
return r.FindStringSubmatch(p)[1]
}
return ""
}
func commaSeparated(l []string) []string {
res := []string{}
for _, i := range l {
if i == "" {
continue
}
res = append(res, strings.Split(i, ",")...)
}
return res
}
| [
"\"HUB_RETRY_TIMEOUT\""
]
| []
| [
"HUB_RETRY_TIMEOUT"
]
| [] | ["HUB_RETRY_TIMEOUT"] | go | 1 | 0 | |
examples/mnist/train_mnist_arcloss.py | # MIT License
#
# Copyright (c) 2018 Haoxintong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""""""
import os
import time
import mxnet as mx
import numpy as np
from gluonfr.loss import ArcLoss
from mxnet.gluon.data.vision import MNIST
from mxnet import nd, gluon, metric as mtc, autograd as ag
from examples.mnist.net.lenet import LeNetPlus
from examples.mnist.utils import transform_train, transform_val, plot_result
os.environ['MXNET_GLUON_REPO'] = 'https://apache-mxnet.s3.cn-north-1.amazonaws.com.cn/'
os.environ['MXNET_ENABLE_GPU_P2P'] = '0'
def validate(net, val_data, ctx, loss, plot=False):
metric = mtc.Accuracy()
val_loss = 0
ebs = []
lbs = []
for i, batch in enumerate(val_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
ots = [net(X) for X in data]
embedds = [ot[0] for ot in ots]
outputs = [ot[1] for ot in ots]
losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)]
metric.update(labels, outputs)
val_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)
if plot:
for es, ls in zip(embedds, labels):
assert len(es) == len(ls)
for idx in range(len(es)):
ebs.append(es[idx].asnumpy())
lbs.append(ls[idx].asscalar())
if plot:
ebs = np.vstack(ebs)
lbs = np.hstack(lbs)
_, val_acc = metric.get()
return val_acc, val_loss / len(val_data), ebs, lbs
def train():
epochs = 100
lr = 0.1
lr_steps = [40, 70, np.inf]
momentum = 0.9
wd = 5e-4
plot_period = 5
ctx = [mx.gpu(i) for i in range(2)]
batch_size = 256
margin_s = 5
margin_m = 0.2
train_set = MNIST(train=True, transform=transform_train)
train_data = gluon.data.DataLoader(train_set, batch_size, True, num_workers=4, last_batch='discard')
val_set = MNIST(train=False, transform=transform_val)
val_data = gluon.data.DataLoader(val_set, batch_size, shuffle=False, num_workers=4)
net = LeNetPlus(embedding_size=64, feature_norm=True, weight_norm=True)
net.initialize(init=mx.init.MSRAPrelu(), ctx=ctx)
# net.load_parameters("./pretrained_mnist.params", ctx=ctx)
net.hybridize()
loss = ArcLoss(s=margin_s, m=margin_m, classes=10)
train_params = net.collect_params()
trainer = gluon.Trainer(train_params, 'sgd', {'learning_rate': lr, 'momentum': momentum, 'wd': wd})
lr_counter = 0
metric = mtc.Accuracy()
num_batch = len(train_data)
for epoch in range(epochs+1):
if epoch == lr_steps[lr_counter]:
trainer.set_learning_rate(trainer.learning_rate * 0.1)
lr_counter += 1
# if (epoch % plot_period) == 0:
# plot = True
# else:
plot = False
train_loss = 0
metric.reset()
tic = time.time()
ebs = []
lbs = []
for batch in train_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
with ag.record():
ots = [net(X) for X in data]
embedds = [ot[0] for ot in ots]
outputs = [ot[1] for ot in ots]
losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)]
for l in losses:
ag.backward(l)
if plot:
for es, ls in zip(embedds, labels):
assert len(es) == len(ls)
for idx in range(len(es)):
ebs.append(es[idx].asnumpy())
lbs.append(ls[idx].asscalar())
trainer.step(batch_size)
metric.update(labels, outputs)
train_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)
_, train_acc = metric.get()
train_loss /= num_batch
val_acc, val_loss, val_ebs, val_lbs = validate(net, val_data, ctx, loss, plot)
if plot:
ebs = np.vstack(ebs)
lbs = np.hstack(lbs)
plot_result(ebs, lbs, os.path.join("../../resources", "arcloss-train-epoch{}.png".format(epoch)))
plot_result(val_ebs, val_lbs, os.path.join("../../resources", "arcloss-val-epoch{}.png".format(epoch)))
toc = time.time()
print('[epoch % 3d] train accuracy: %.6f, train loss: %.6f | '
'val accuracy: %.6f, val loss: %.6f, time: %.6f'
% (epoch, train_acc, train_loss, val_acc, val_loss, toc - tic))
# if epoch == 10:
# net.save_parameters("./pretrained_mnist.params")
# net.save_parameters("./models/attention%d-cifar10-epoch-%d.params" % (args.num_layers, epoch))
if __name__ == '__main__':
train() | []
| []
| [
"MXNET_ENABLE_GPU_P2P",
"MXNET_GLUON_REPO"
]
| [] | ["MXNET_ENABLE_GPU_P2P", "MXNET_GLUON_REPO"] | python | 2 | 0 | |
aiohttp/helpers.py | """Various helper functions"""
import asyncio
import base64
import binascii
import cgi
import datetime
import functools
import inspect
import netrc
import os
import re
import sys
import time
import weakref
from collections import namedtuple
from contextlib import suppress
from math import ceil
from pathlib import Path
from urllib.parse import quote
from urllib.request import getproxies
import async_timeout
import attr
from multidict import MultiDict
from yarl import URL
from . import hdrs
from .abc import AbstractAccessLogger
from .log import client_logger
__all__ = ('BasicAuth',)
PY_36 = sys.version_info >= (3, 6)
PY_37 = sys.version_info >= (3, 7)
if not PY_37:
import idna_ssl
idna_ssl.patch_match_hostname()
sentinel = object()
NO_EXTENSIONS = bool(os.environ.get('AIOHTTP_NO_EXTENSIONS'))
# N.B. sys.flags.dev_mode is available on Python 3.7+, use getattr
# for compatibility with older versions
DEBUG = (getattr(sys.flags, 'dev_mode', False) or
(not sys.flags.ignore_environment and
bool(os.environ.get('PYTHONASYNCIODEBUG'))))
CHAR = set(chr(i) for i in range(0, 128))
CTL = set(chr(i) for i in range(0, 32)) | {chr(127), }
SEPARATORS = {'(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']',
'?', '=', '{', '}', ' ', chr(9)}
TOKEN = CHAR ^ CTL ^ SEPARATORS
coroutines = asyncio.coroutines
old_debug = coroutines._DEBUG
# prevent "coroutine noop was never awaited" warning.
coroutines._DEBUG = False
@asyncio.coroutine
def noop(*args, **kwargs):
return
coroutines._DEBUG = old_debug
class BasicAuth(namedtuple('BasicAuth', ['login', 'password', 'encoding'])):
"""Http basic authentication helper."""
def __new__(cls, login, password='', encoding='latin1'):
if login is None:
raise ValueError('None is not allowed as login value')
if password is None:
raise ValueError('None is not allowed as password value')
if ':' in login:
raise ValueError(
'A ":" is not allowed in login (RFC 1945#section-11.1)')
return super().__new__(cls, login, password, encoding)
@classmethod
def decode(cls, auth_header, encoding='latin1'):
"""Create a BasicAuth object from an Authorization HTTP header."""
split = auth_header.strip().split(' ')
if len(split) == 2:
if split[0].strip().lower() != 'basic':
raise ValueError('Unknown authorization method %s' % split[0])
to_decode = split[1]
else:
raise ValueError('Could not parse authorization header.')
try:
username, _, password = base64.b64decode(
to_decode.encode('ascii')
).decode(encoding).partition(':')
except binascii.Error:
raise ValueError('Invalid base64 encoding.')
return cls(username, password, encoding=encoding)
@classmethod
def from_url(cls, url, *, encoding='latin1'):
"""Create BasicAuth from url."""
if not isinstance(url, URL):
raise TypeError("url should be yarl.URL instance")
if url.user is None:
return None
return cls(url.user, url.password or '', encoding=encoding)
def encode(self):
"""Encode credentials."""
creds = ('%s:%s' % (self.login, self.password)).encode(self.encoding)
return 'Basic %s' % base64.b64encode(creds).decode(self.encoding)
def strip_auth_from_url(url):
auth = BasicAuth.from_url(url)
if auth is None:
return url, None
else:
return url.with_user(None), auth
def netrc_from_env():
netrc_obj = None
netrc_path = os.environ.get('NETRC')
try:
if netrc_path is not None:
netrc_path = Path(netrc_path)
else:
home_dir = Path.home()
if os.name == 'nt': # pragma: no cover
netrc_path = home_dir.joinpath('_netrc')
else:
netrc_path = home_dir.joinpath('.netrc')
if netrc_path and netrc_path.is_file():
try:
netrc_obj = netrc.netrc(str(netrc_path))
except (netrc.NetrcParseError, OSError) as e:
client_logger.warning(".netrc file parses fail: %s", e)
if netrc_obj is None:
client_logger.warning("could't find .netrc file")
except RuntimeError as e: # pragma: no cover
""" handle error raised by pathlib """
client_logger.warning("could't find .netrc file: %s", e)
return netrc_obj
@attr.s(frozen=True, slots=True)
class ProxyInfo:
proxy = attr.ib(type=str)
proxy_auth = attr.ib(type=BasicAuth)
def proxies_from_env():
proxy_urls = {k: URL(v) for k, v in getproxies().items()
if k in ('http', 'https')}
netrc_obj = netrc_from_env()
stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()}
ret = {}
for proto, val in stripped.items():
proxy, auth = val
if proxy.scheme == 'https':
client_logger.warning(
"HTTPS proxies %s are not supported, ignoring", proxy)
continue
if netrc_obj and auth is None:
auth_from_netrc = netrc_obj.authenticators(proxy.host)
if auth_from_netrc is not None:
# auth_from_netrc is a (`user`, `account`, `password`) tuple,
# `user` and `account` both can be username,
# if `user` is None, use `account`
*logins, password = auth_from_netrc
auth = BasicAuth(logins[0] if logins[0] else logins[-1],
password)
ret[proto] = ProxyInfo(proxy, auth)
return ret
def current_task(loop=None):
if loop is None:
loop = asyncio.get_event_loop()
if PY_37:
task = asyncio.current_task(loop=loop)
else:
task = asyncio.Task.current_task(loop=loop)
if task is None:
# this should be removed, tokio must use register_task and family API
if hasattr(loop, 'current_task'):
task = loop.current_task()
return task
def isasyncgenfunction(obj):
if hasattr(inspect, 'isasyncgenfunction'):
return inspect.isasyncgenfunction(obj)
return False
@attr.s(frozen=True, slots=True)
class MimeType:
type = attr.ib(type=str)
subtype = attr.ib(type=str)
suffix = attr.ib(type=str)
parameters = attr.ib(type=MultiDict)
def parse_mimetype(mimetype):
"""Parses a MIME type into its components.
mimetype is a MIME type string.
Returns a MimeType object.
Example:
>>> parse_mimetype('text/html; charset=utf-8')
MimeType(type='text', subtype='html', suffix='',
parameters={'charset': 'utf-8'})
"""
if not mimetype:
return MimeType(type='', subtype='', suffix='', parameters={})
parts = mimetype.split(';')
params = []
for item in parts[1:]:
if not item:
continue
key, value = item.split('=', 1) if '=' in item else (item, '')
params.append((key.lower().strip(), value.strip(' "')))
params = MultiDict(params)
fulltype = parts[0].strip().lower()
if fulltype == '*':
fulltype = '*/*'
mtype, stype = fulltype.split('/', 1) \
if '/' in fulltype else (fulltype, '')
stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')
return MimeType(type=mtype, subtype=stype, suffix=suffix,
parameters=params)
def guess_filename(obj, default=None):
name = getattr(obj, 'name', None)
if name and isinstance(name, str) and name[0] != '<' and name[-1] != '>':
return Path(name).name
return default
def content_disposition_header(disptype, quote_fields=True, **params):
"""Sets ``Content-Disposition`` header.
disptype is a disposition type: inline, attachment, form-data.
Should be valid extension token (see RFC 2183)
params is a dict with disposition params.
"""
if not disptype or not (TOKEN > set(disptype)):
raise ValueError('bad content disposition type {!r}'
''.format(disptype))
value = disptype
if params:
lparams = []
for key, val in params.items():
if not key or not (TOKEN > set(key)):
raise ValueError('bad content disposition parameter'
' {!r}={!r}'.format(key, val))
qval = quote(val, '') if quote_fields else val
lparams.append((key, '"%s"' % qval))
if key == 'filename':
lparams.append(('filename*', "utf-8''" + qval))
sparams = '; '.join('='.join(pair) for pair in lparams)
value = '; '.join((value, sparams))
return value
class AccessLogger(AbstractAccessLogger):
"""Helper object to log access.
Usage:
log = logging.getLogger("spam")
log_format = "%a %{User-Agent}i"
access_logger = AccessLogger(log, log_format)
access_logger.log(request, response, time)
Format:
%% The percent sign
%a Remote IP-address (IP-address of proxy if using reverse proxy)
%t Time when the request was started to process
%P The process ID of the child that serviced the request
%r First line of request
%s Response status code
%b Size of response in bytes, including HTTP headers
%T Time taken to serve the request, in seconds
%Tf Time taken to serve the request, in seconds with floating fraction
in .06f format
%D Time taken to serve the request, in microseconds
%{FOO}i request.headers['FOO']
%{FOO}o response.headers['FOO']
%{FOO}e os.environ['FOO']
"""
LOG_FORMAT_MAP = {
'a': 'remote_address',
't': 'request_start_time',
'P': 'process_id',
'r': 'first_request_line',
's': 'response_status',
'b': 'response_size',
'T': 'request_time',
'Tf': 'request_time_frac',
'D': 'request_time_micro',
'i': 'request_header',
'o': 'response_header',
}
LOG_FORMAT = '%a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i"'
FORMAT_RE = re.compile(r'%(\{([A-Za-z0-9\-_]+)\}([ioe])|[atPrsbOD]|Tf?)')
CLEANUP_RE = re.compile(r'(%[^s])')
_FORMAT_CACHE = {}
KeyMethod = namedtuple('KeyMethod', 'key method')
def __init__(self, logger, log_format=LOG_FORMAT):
"""Initialise the logger.
logger is a logger object to be used for logging.
log_format is an string with apache compatible log format description.
"""
super().__init__(logger, log_format=log_format)
_compiled_format = AccessLogger._FORMAT_CACHE.get(log_format)
if not _compiled_format:
_compiled_format = self.compile_format(log_format)
AccessLogger._FORMAT_CACHE[log_format] = _compiled_format
self._log_format, self._methods = _compiled_format
def compile_format(self, log_format):
"""Translate log_format into form usable by modulo formatting
All known atoms will be replaced with %s
Also methods for formatting of those atoms will be added to
_methods in appropriate order
For example we have log_format = "%a %t"
This format will be translated to "%s %s"
Also contents of _methods will be
[self._format_a, self._format_t]
These method will be called and results will be passed
to translated string format.
Each _format_* method receive 'args' which is list of arguments
given to self.log
Exceptions are _format_e, _format_i and _format_o methods which
also receive key name (by functools.partial)
"""
# list of (key, method) tuples, we don't use an OrderedDict as users
# can repeat the same key more than once
methods = list()
for atom in self.FORMAT_RE.findall(log_format):
if atom[1] == '':
format_key = self.LOG_FORMAT_MAP[atom[0]]
m = getattr(AccessLogger, '_format_%s' % atom[0])
else:
format_key = (self.LOG_FORMAT_MAP[atom[2]], atom[1])
m = getattr(AccessLogger, '_format_%s' % atom[2])
m = functools.partial(m, atom[1])
methods.append(self.KeyMethod(format_key, m))
log_format = self.FORMAT_RE.sub(r'%s', log_format)
log_format = self.CLEANUP_RE.sub(r'%\1', log_format)
return log_format, methods
@staticmethod
def _format_i(key, request, response, time):
if request is None:
return '(no headers)'
# suboptimal, make istr(key) once
return request.headers.get(key, '-')
@staticmethod
def _format_o(key, request, response, time):
# suboptimal, make istr(key) once
return response.headers.get(key, '-')
@staticmethod
def _format_a(request, response, time):
if request is None:
return '-'
ip = request.remote
return ip if ip is not None else '-'
@staticmethod
def _format_t(request, response, time):
now = datetime.datetime.utcnow()
start_time = now - datetime.timedelta(seconds=time)
return start_time.strftime('[%d/%b/%Y:%H:%M:%S +0000]')
@staticmethod
def _format_P(request, response, time):
return "<%s>" % os.getpid()
@staticmethod
def _format_r(request, response, time):
if request is None:
return '-'
return '%s %s HTTP/%s.%s' % tuple((request.method,
request.path_qs) + request.version)
@staticmethod
def _format_s(request, response, time):
return response.status
@staticmethod
def _format_b(request, response, time):
return response.body_length
@staticmethod
def _format_T(request, response, time):
return round(time)
@staticmethod
def _format_Tf(request, response, time):
return '%06f' % time
@staticmethod
def _format_D(request, response, time):
return round(time * 1000000)
def _format_line(self, request, response, time):
return ((key, method(request, response, time))
for key, method in self._methods)
def log(self, request, response, time):
try:
fmt_info = self._format_line(request, response, time)
values = list()
extra = dict()
for key, value in fmt_info:
values.append(value)
if key.__class__ is str:
extra[key] = value
else:
k1, k2 = key
dct = extra.get(k1, {})
dct[k2] = value
extra[k1] = dct
self.logger.info(self._log_format % tuple(values), extra=extra)
except Exception:
self.logger.exception("Error in logging")
class reify:
"""Use as a class method decorator. It operates almost exactly like
the Python `@property` decorator, but it puts the result of the
method it decorates into the instance dict after the first call,
effectively replacing the function it decorates with an instance
variable. It is, in Python parlance, a data descriptor.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except Exception: # pragma: no cover
self.__doc__ = ""
self.name = wrapped.__name__
def __get__(self, inst, owner, _sentinel=sentinel):
try:
try:
return inst._cache[self.name]
except KeyError:
val = self.wrapped(inst)
inst._cache[self.name] = val
return val
except AttributeError:
if inst is None:
return self
raise
def __set__(self, inst, value):
raise AttributeError("reified property is read-only")
_ipv4_pattern = (r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')
_ipv6_pattern = (
r'^(?:(?:(?:[A-F0-9]{1,4}:){6}|(?=(?:[A-F0-9]{0,4}:){0,6}'
r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}$)(([0-9A-F]{1,4}:){0,5}|:)'
r'((:[0-9A-F]{1,4}){1,5}:|:)|::(?:[A-F0-9]{1,4}:){5})'
r'(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])|(?:[A-F0-9]{1,4}:){7}'
r'[A-F0-9]{1,4}|(?=(?:[A-F0-9]{0,4}:){0,7}[A-F0-9]{0,4}$)'
r'(([0-9A-F]{1,4}:){1,7}|:)((:[0-9A-F]{1,4}){1,7}|:)|(?:[A-F0-9]{1,4}:){7}'
r':|:(:[A-F0-9]{1,4}){7})$')
_ipv4_regex = re.compile(_ipv4_pattern)
_ipv6_regex = re.compile(_ipv6_pattern, flags=re.IGNORECASE)
_ipv4_regexb = re.compile(_ipv4_pattern.encode('ascii'))
_ipv6_regexb = re.compile(_ipv6_pattern.encode('ascii'), flags=re.IGNORECASE)
def is_ip_address(host):
if host is None:
return False
if isinstance(host, str):
if _ipv4_regex.match(host) or _ipv6_regex.match(host):
return True
else:
return False
elif isinstance(host, (bytes, bytearray, memoryview)):
if _ipv4_regexb.match(host) or _ipv6_regexb.match(host):
return True
else:
return False
else:
raise TypeError("{} [{}] is not a str or bytes"
.format(host, type(host)))
_cached_current_datetime = None
_cached_formatted_datetime = None
def rfc822_formatted_time():
global _cached_current_datetime
global _cached_formatted_datetime
now = int(time.time())
if now != _cached_current_datetime:
# Weekday and month names for HTTP date/time formatting;
# always English!
# Tuples are constants stored in codeobject!
_weekdayname = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")
_monthname = ("", # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec")
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
_cached_formatted_datetime = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
_cached_current_datetime = now
return _cached_formatted_datetime
def _weakref_handle(info):
ref, name = info
ob = ref()
if ob is not None:
with suppress(Exception):
getattr(ob, name)()
def weakref_handle(ob, name, timeout, loop, ceil_timeout=True):
if timeout is not None and timeout > 0:
when = loop.time() + timeout
if ceil_timeout:
when = ceil(when)
return loop.call_at(when, _weakref_handle, (weakref.ref(ob), name))
def call_later(cb, timeout, loop):
if timeout is not None and timeout > 0:
when = ceil(loop.time() + timeout)
return loop.call_at(when, cb)
class TimeoutHandle:
""" Timeout handle """
def __init__(self, loop, timeout):
self._timeout = timeout
self._loop = loop
self._callbacks = []
def register(self, callback, *args, **kwargs):
self._callbacks.append((callback, args, kwargs))
def close(self):
self._callbacks.clear()
def start(self):
if self._timeout is not None and self._timeout > 0:
at = ceil(self._loop.time() + self._timeout)
return self._loop.call_at(at, self.__call__)
def timer(self):
if self._timeout is not None and self._timeout > 0:
timer = TimerContext(self._loop)
self.register(timer.timeout)
else:
timer = TimerNoop()
return timer
def __call__(self):
for cb, args, kwargs in self._callbacks:
with suppress(Exception):
cb(*args, **kwargs)
self._callbacks.clear()
class TimerNoop:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class TimerContext:
""" Low resolution timeout context manager """
def __init__(self, loop):
self._loop = loop
self._tasks = []
self._cancelled = False
def __enter__(self):
task = current_task(loop=self._loop)
if task is None:
raise RuntimeError('Timeout context manager should be used '
'inside a task')
if self._cancelled:
task.cancel()
raise asyncio.TimeoutError from None
self._tasks.append(task)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._tasks:
self._tasks.pop()
if exc_type is asyncio.CancelledError and self._cancelled:
raise asyncio.TimeoutError from None
def timeout(self):
if not self._cancelled:
for task in set(self._tasks):
task.cancel()
self._cancelled = True
class CeilTimeout(async_timeout.timeout):
def __enter__(self):
if self._timeout is not None:
self._task = current_task(loop=self._loop)
if self._task is None:
raise RuntimeError(
'Timeout context manager should be used inside a task')
self._cancel_handler = self._loop.call_at(
ceil(self._loop.time() + self._timeout), self._cancel_task)
return self
class HeadersMixin:
ATTRS = frozenset([
'_content_type', '_content_dict', '_stored_content_type'])
_content_type = None
_content_dict = None
_stored_content_type = sentinel
def _parse_content_type(self, raw):
self._stored_content_type = raw
if raw is None:
# default value according to RFC 2616
self._content_type = 'application/octet-stream'
self._content_dict = {}
else:
self._content_type, self._content_dict = cgi.parse_header(raw)
@property
def content_type(self, *, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of content part for Content-Type HTTP header."""
raw = self._headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_type
@property
def charset(self, *, _CONTENT_TYPE=hdrs.CONTENT_TYPE):
"""The value of charset part for Content-Type HTTP header."""
raw = self._headers.get(_CONTENT_TYPE)
if self._stored_content_type != raw:
self._parse_content_type(raw)
return self._content_dict.get('charset')
@property
def content_length(self, *, _CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
"""The value of Content-Length HTTP header."""
content_length = self._headers.get(_CONTENT_LENGTH)
if content_length:
return int(content_length)
def set_result(fut, result):
if not fut.done():
fut.set_result(result)
def set_exception(fut, exc):
if not fut.done():
fut.set_exception(exc)
| []
| []
| [
"FOO",
"PYTHONASYNCIODEBUG",
"AIOHTTP_NO_EXTENSIONS",
"NETRC"
]
| [] | ["FOO", "PYTHONASYNCIODEBUG", "AIOHTTP_NO_EXTENSIONS", "NETRC"] | python | 4 | 0 | |
bear_export_sync.py | # encoding=utf-8
# python3.6
# bear_export_sync.py
# Developed with Visual Studio Code with MS Python Extension.
'''
# Markdown export from Bear sqlite database
Version 1.3.13, 2018-03-06 at 15:32 EST
github/rovest, rorves@twitter
See also: bear_import.py for auto import to bear script.
## Sync external updates:
First checks for changes in external Markdown files (previously exported from Bear)
* Replacing text in original note with callback-url `/add-text&mode=replace_all` command
(Keeping original creation date)
Now using the new mode: `replace_all` to include title.
* New notes are added to Bear (with x-callback-url command)
* New notes get tags from sub folder names, or `#.inbox` if root
* Backing up original note as file to BearSyncBackup folder
(unless a sync conflict, then both notes will be there)
## Export:
Then exporting Markdown from Bear sqlite db.
* check_if_modified() on database.sqlite to see if export is needed
* Uses rsync for copying, so only markdown files of changed sheets will be updated
and synced by Dropbox (or other sync services)
* "Hides" tags with `period+space` on beginning of line: `. #tag` not appear as H1 in other apps.
(This is removed if sync-back above)
* Or instead hide tags in HTML comment blocks like: `<!-- #mytag -->` if `hide_tags_in_comment_block = True`
* Makes subfolders named with first tag in note if `make_tag_folders = True`
* Files can now be copied to multiple tag-folders if `multi_tags = True`
* Export can now be restricted to a list of spesific tags: `limit_export_to_tags = ['bear/github', 'writings']`
or leave list empty for all notes: `limit_export_to_tags = []`
* Can export and link to images in common image repository
* Or export as textbundles with images included
'''
import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='Markdown export from Bear sqlite database.')
parser.add_argument("--sync", type=str2bool, nargs='?', const=True,
default=True, help="(Default: True) Sync external updates back into Bear")
parser.add_argument("--make_tag_folders", type=str2bool, nargs='?', const=True,
default=True, help="(Default: True) Exports to folders using first tag only, if `multi_tag_folders = False`")
parser.add_argument("--multi_tag_folders", type=str2bool, nargs='?', const=True,
default=True, help="(Default: True) Copies notes to all 'tag-paths' found in note! Only active if `make_tag_folders = True`")
parser.add_argument("--raw", type=str2bool, nargs='?', const=False,
default=False, help=("(Default: False) Exports without any modification to the note contents, "
+ "just like Bear does. This implies not hiding tags, not adding BearID. "
+ "Note: This effectively disables later syncing of modified contents."))
parser.add_argument("--hide_tags_in_comment_block", type=str2bool, nargs='?', const=True,
default=True, help="(Default: True) Hide tags in HTML comments: `<!-- #mytag -->`")
# The following two lists are more or less mutually exclusive, so use only one of them.
# (You can use both if you have some nested tags where that makes sense)
parser.add_argument("--only_export_these_tags", nargs='*',
default=[], help="(Default: '') Example: \"--only_export_these_tags 'bear/github' 'writings'\". Leave this list empty for all notes! Works only if `make_tag_folders = True`")
parser.add_argument("--no_export_tags", nargs='*',
default=[], help="(Default: '') Example: \"--no_export_tags 'private' '.inbox' 'love letters' 'banking'\". If a tag in note matches one in this list, it will not be exported.")
parser.add_argument("--export_as_textbundles", type=str2bool, nargs='?', const=True,
default=False, help="(Default: False) Exports as Textbundles with images included")
parser.add_argument("--export_as_hybrids", type=str2bool, nargs='?', const=True,
default=True, help="(Default: True) Exports as .textbundle only if images included, otherwise as .md. Only used if `export_as_textbundles = True`")
parser.add_argument("--export_image_repository", type=str2bool, nargs='?', const=True,
default=False, help="(Default: False) Export all notes as md but link images to a common repository exported to: `assets_path`. Only used if `export_as_textbundles = False`")
parser.add_argument("--my_sync_service", nargs='?', const=True,
default="Dropbox", help="(Default: Dropbox) Change 'Dropbox' to 'Box', 'Onedrive', or whatever folder of sync service you need.")
parser.add_argument("--set_logging_on", type=str2bool, nargs='?', const=True,
default=True, help="(Default: True)")
parser.add_argument("--base_export_path", nargs='?', const=True,
default=False, help="(Default: '~/{my_sync_service}') The folder in which 'BearNotes' and 'BearSyncBackup' exists or will be created on first run")
args = parser.parse_args()
sync = args.sync
make_tag_folders = args.make_tag_folders
multi_tag_folders = args.multi_tag_folders
raw = args.raw
hide_tags_in_comment_block = args.hide_tags_in_comment_block
only_export_these_tags = args.only_export_these_tags
no_export_tags = args.no_export_tags
export_as_textbundles = args.export_as_textbundles
export_as_hybrids = args.export_as_hybrids
export_image_repository = args.export_image_repository
my_sync_service = args.my_sync_service
set_logging_on = args.set_logging_on
base_export_path = args.base_export_path
# NOTE! Your user 'HOME' path and '/BearNotes' is added below!
# NOTE! So do not change anything below here!!!
import sqlite3
import datetime
import re
import subprocess
import urllib.parse
import os
import time
import shutil
import fnmatch
import json
HOME = os.getenv('HOME', '')
if base_export_path == False:
base_export_path = os.path.join(HOME, my_sync_service)
# NOTE! if 'BearNotes' is left blank, all other files in my_sync_service will be deleted!!
export_path = os.path.join(base_export_path, 'BearNotes')
# NOTE! "export_path" is used for sync-back to Bear, so don't change this variable name!
multi_export = [(export_path, True)] # only one folder output here.
# Use if you want export to several places like: Dropbox and OneDrive, etc. See below
# Sample for multi folder export:
# export_path_aux1 = os.path.join(HOME, 'OneDrive', 'BearNotes')
# export_path_aux2 = os.path.join(HOME, 'Box', 'BearNotes')
# NOTE! All files in export path not in Bear will be deleted if delete flag is "True"!
# Set this flag fo False only for folders to keep old deleted versions of notes
# multi_export = [(export_path, True), (export_path_aux1, False), (export_path_aux2, True)]
temp_path = os.path.join(HOME, 'Temp', 'BearExportTemp') # NOTE! Do not change the "BearExportTemp" folder name!!!
bear_db = os.path.join(HOME, 'Library/Containers/net.shinyfrog.bear/Data/Documents/Application Data/database.sqlite')
if not (os.path.isfile(bear_db)):
bear_db = os.path.join(HOME, 'Library/Group Containers/9K33E3U3T4.net.shinyfrog.bear/Application Data/database.sqlite')
if not (os.path.isfile(bear_db)):
raise RuntimeError("The Bear database was not found")
sync_backup = os.path.join(base_export_path, 'BearSyncBackup') # Backup of original note before sync to Bear.
log_file = os.path.join(sync_backup, 'bear_export_sync_log.txt')
# Paths used in image exports:
bear_image_path = os.path.join(HOME,
'Library/Containers/net.shinyfrog.bear/Data/Documents/Application Data/Local Files/Note Images')
assets_path = os.path.join(HOME, export_path, 'BearImages')
sync_ts = '.sync-time.log'
export_ts = '.export-time.log'
sync_ts_file = os.path.join(export_path, sync_ts)
sync_ts_file_temp = os.path.join(temp_path, sync_ts)
export_ts_file_exp = os.path.join(export_path, export_ts)
export_ts_file = os.path.join(temp_path, export_ts)
gettag_sh = os.path.join(HOME, 'temp/gettag.sh')
gettag_txt = os.path.join(HOME, 'temp/gettag.txt')
def main():
init_gettag_script()
if sync:
sync_md_updates()
if check_db_modified():
delete_old_temp_files()
note_count = export_markdown()
write_time_stamp()
rsync_files_from_temp()
if export_image_repository and not export_as_textbundles:
copy_bear_images()
# notify('Export completed')
write_log(str(note_count) + ' notes exported to: ' + export_path)
print(str(note_count) + ' notes exported to: ' + export_path)
else:
print('*** No notes needed exports')
def write_log(message):
if set_logging_on == True:
if not os.path.exists(sync_backup):
os.makedirs(sync_backup)
time_stamp = datetime.datetime.now().strftime("%Y-%m-%d at %H:%M:%S")
message = message.replace(export_path + '/', '')
with open(log_file, 'a', encoding='utf-8') as f:
f.write(time_stamp + ': ' + message + '\n')
def check_db_modified():
if not os.path.exists(sync_ts_file):
return True
db_ts = get_file_date(bear_db)
last_export_ts = get_file_date(export_ts_file_exp)
return db_ts > last_export_ts
def export_markdown():
with sqlite3.connect(bear_db) as conn:
conn.row_factory = sqlite3.Row
query = "SELECT * FROM `ZSFNOTE` WHERE `ZTRASHED` LIKE '0'"
c = conn.execute(query)
note_count = 0
for row in c:
title = row['ZTITLE']
md_text = row['ZTEXT'].rstrip()
creation_date = row['ZCREATIONDATE']
modified = row['ZMODIFICATIONDATE']
uuid = row['ZUNIQUEIDENTIFIER']
filename = clean_title(title) + date_time_conv(creation_date)
file_list = []
if make_tag_folders:
file_list = sub_path_from_tag(temp_path, filename, md_text)
else:
file_list.append(os.path.join(temp_path, filename))
if file_list:
mod_dt = dt_conv(modified)
if not raw:
md_text = hide_tags(md_text)
md_text += '\n\n<!-- {BearID:' + uuid + '} -->\n'
for filepath in set(file_list):
note_count += 1
# print(filepath)
if export_as_textbundles:
if check_image_hybrid(md_text):
make_text_bundle(md_text, filepath, mod_dt)
else:
write_file(filepath + '.md', md_text, mod_dt)
elif export_image_repository:
md_proc_text = process_image_links(md_text, filepath)
write_file(filepath + '.md', md_proc_text, mod_dt)
else:
write_file(filepath + '.md', md_text, mod_dt)
return note_count
def check_image_hybrid(md_text):
if export_as_hybrids:
if re.search(r'\[image:(.+?)\]', md_text):
return True
else:
return False
else:
return True
def make_text_bundle(md_text, filepath, mod_dt):
'''
Exports as Textbundles with images included
'''
bundle_path = filepath + '.textbundle'
assets_path = os.path.join(bundle_path, 'assets')
if not os.path.exists(bundle_path):
os.makedirs(bundle_path)
os.makedirs(assets_path)
info = '''{
"transient" : true,
"type" : "net.daringfireball.markdown",
"creatorIdentifier" : "net.shinyfrog.bear",
"version" : 2
}'''
matches = re.findall(r'\[image:(.+?)\]', md_text)
for match in matches:
image_name = match
new_name = image_name.replace('/', '_')
source = os.path.join(bear_image_path, image_name)
target = os.path.join(assets_path, new_name[:255])
shutil.copy2(source, target)
md_text = re.sub(r'\[image:(.+?)/(.+?)\]', r'', md_text)
write_file(bundle_path + '/text.md', md_text, mod_dt)
write_file(bundle_path + '/info.json', info, mod_dt)
os.utime(bundle_path, (-1, mod_dt))
def sub_path_from_tag(temp_path, filename, md_text):
# Get tags in note:
pattern1 = r'(?<!\S)\#([^ \d][\w\/\-]+)[ \n]?(?!([\/ \w\-]+\w[#]))'
pattern2 = r'(?<![\S])\#([^ \d][.\w\/ \-]+?)\#([ \n]|$)'
if multi_tag_folders:
# Files copied to all tag-folders found in note
tags = []
for matches in re.findall(pattern1, md_text):
tag = matches[0]
tags.append(tag)
for matches2 in re.findall(pattern2, md_text):
tag2 = matches2[0]
tags.append(tag2)
if len(tags) == 0:
if only_export_these_tags:
return []
# No tags found, copy to root level only
return [os.path.join(temp_path, filename)]
else:
# Only folder for first tag
match1 = re.search(pattern1, md_text)
match2 = re.search(pattern2, md_text)
if match1 and match2:
if match1.start(1) < match2.start(1):
tag = match1.group(1)
else:
tag = match2.group(1)
elif match1:
tag = match1.group(1)
elif match2:
tag = match2.group(1)
elif only_export_these_tags:
return []
else:
# No tags found, copy to root level only
return [os.path.join(temp_path, filename)]
tags = [tag]
paths = []
for tag in tags:
if tag == '/':
continue
if only_export_these_tags:
export = False
for export_tag in only_export_these_tags:
if tag.lower().startswith(export_tag.lower()):
export = True
break
if not export:
continue
for no_tag in no_export_tags:
if tag.lower().startswith(no_tag.lower()):
return []
if tag.startswith('.'):
# Avoid hidden path if it starts with a '.'
sub_path = '_' + tag[1:]
else:
sub_path = tag
tag_path = os.path.join(temp_path, sub_path)
if not os.path.exists(tag_path):
os.makedirs(tag_path)
paths.append(os.path.join(tag_path, filename))
return paths
def process_image_links(md_text, filepath):
'''
Bear image links converted to MD links
'''
root = filepath.replace(temp_path, '')
level = len(root.split('/')) - 2
parent = '../' * level
md_text = re.sub(r'\[image:(.+?)\]', r'', md_text)
return md_text
def restore_image_links(md_text):
'''
MD image links restored back to Bear links
'''
if not re.search(r'!\[.*?\]\(assets/.+?\)', md_text):
# No image links in note, return unchanged:
return md_text
if export_as_textbundles:
md_text = re.sub(r'!\[(.*?)\]\(assets/(.+?)_(.+?)( ".+?")?\) ?', r'[image:\2/\3]\4 \1', md_text)
elif export_image_repository :
# md_text = re.sub(r'\[image:(.+?)\]', r'', md_text)
md_text = re.sub(r'!\[\]\((\.\./)*BearImages/(.+?)\)', r'[image:\2]', md_text)
return md_text
def copy_bear_images():
# Image files copied to a common image repository
subprocess.call(['rsync', '-r', '-t', '--delete',
bear_image_path + "/", assets_path])
def write_time_stamp():
# write to time-stamp.txt file (used during sync)
write_file(export_ts_file, "Markdown from Bear written at: " +
datetime.datetime.now().strftime("%Y-%m-%d at %H:%M:%S"), 0)
write_file(sync_ts_file_temp, "Markdown from Bear written at: " +
datetime.datetime.now().strftime("%Y-%m-%d at %H:%M:%S"), 0)
def hide_tags(md_text):
# Hide tags from being seen as H1, by placing `period+space` at start of line:
if hide_tags_in_comment_block:
md_text = re.sub(r'(\n)[ \t]*(\#[\w.].+)', r'\1<!-- \2 -->', md_text)
else:
md_text = re.sub(r'(\n)[ \t]*(\#[\w.]+)', r'\1. \2', md_text)
return md_text
def restore_tags(md_text):
# Tags back to normal Bear tags, stripping the `period+space` at start of line:
# if hide_tags_in_comment_block:
md_text = re.sub(r'(\n)<!--[ \t]*(\#[\w.].+?) -->', r'\1\2', md_text)
# else:
md_text = re.sub(r'(\n)\.[ \t]*(\#[\w.]+)', r'\1\2', md_text)
return md_text
def clean_title(title):
title = title[:56].strip()
if title == "":
title = "Untitled"
title = re.sub(r'[/\\*?$@!^&\|~:]', r'-', title)
title = re.sub(r'-$', r'', title)
return title.strip()
def write_file(filename, file_content, modified):
with open(filename, "w", encoding='utf-8') as f:
f.write(file_content)
if modified > 0:
os.utime(filename, (-1, modified))
def read_file(file_name):
with open(file_name, "r", encoding='utf-8') as f:
file_content = f.read()
return file_content
def get_file_date(filename):
try:
t = os.path.getmtime(filename)
return t
except:
return 0
def dt_conv(dtnum):
# Formula for date offset based on trial and error:
hour = 3600 # seconds
year = 365.25 * 24 * hour
offset = year * 31 + hour * 6
return dtnum + offset
def date_time_conv(dtnum):
newnum = dt_conv(dtnum)
dtdate = datetime.datetime.fromtimestamp(newnum)
#print(newnum, dtdate)
return dtdate.strftime(' - %Y-%m-%d_%H%M')
def time_stamp_ts(ts):
dtdate = datetime.datetime.fromtimestamp(ts)
return dtdate.strftime('%Y-%m-%d at %H:%M')
def date_conv(dtnum):
dtdate = datetime.datetime.fromtimestamp(dtnum)
return dtdate.strftime('%Y-%m-%d')
def delete_old_temp_files():
# Deletes all files in temp folder before new export using "shutil.rmtree()":
# NOTE! CAUTION! Do not change this function unless you really know shutil.rmtree() well!
if os.path.exists(temp_path) and "BearExportTemp" in temp_path:
# *** NOTE! Double checking that temp_path folder actually contains "BearExportTemp"
# *** Because if temp_path is accidentally empty or root,
# *** shutil.rmtree() will delete all files on your complete Hard Drive ;(
shutil.rmtree(temp_path)
# *** NOTE: USE rmtree() WITH EXTREME CAUTION!
os.makedirs(temp_path)
def rsync_files_from_temp():
# Moves markdown files to new folder using rsync:
# This is a very important step!
# By first exporting all Bear notes to an emptied temp folder,
# rsync will only update destination if modified or size have changed.
# So only changed notes will be synced by Dropbox or OneDrive destinations.
# Rsync will also delete notes on destination if deleted in Bear.
# So doing it this way saves a lot of otherwise very complex programing.
# Thank you very much, Rsync! ;)
for (dest_path, delete) in multi_export:
if not os.path.exists(dest_path):
os.makedirs(dest_path)
if delete:
subprocess.call(['rsync', '-r', '-t', '--delete',
'--exclude', 'BearImages/',
'--exclude', '.Ulysses*',
'--exclude', '*.Ulysses_Public_Filter',
temp_path + "/", dest_path])
else:
subprocess.call(['rsync', '-r', '-t',
temp_path + "/", dest_path])
def sync_md_updates():
updates_found = False
if not os.path.exists(sync_ts_file) or not os.path.exists(export_ts_file):
return False
ts_last_sync = os.path.getmtime(sync_ts_file)
ts_last_export = os.path.getmtime(export_ts_file)
# Update synced timestamp file:
update_sync_time_file(0)
file_types = ('*.md', '*.txt', '*.markdown')
for (root, dirnames, filenames) in os.walk(export_path):
'''
This step walks down into all sub folders, if any.
'''
for pattern in file_types:
for filename in fnmatch.filter(filenames, pattern):
md_file = os.path.join(root, filename)
ts = os.path.getmtime(md_file)
if ts > ts_last_sync:
if not updates_found: # Yet
# Wait 5 sec at first for external files to finish downloading from dropbox.
# Otherwise images in textbundles might be missing in import:
time.sleep(5)
updates_found = True
md_text = read_file(md_file)
backup_ext_note(md_file)
if check_if_image_added(md_text, md_file):
textbundle_to_bear(md_text, md_file, ts)
write_log('Imported to Bear: ' + md_file)
else:
update_bear_note(md_text, md_file, ts, ts_last_export)
write_log('Bear Note Updated: ' + md_file)
if updates_found:
# Give Bear time to process updates:
time.sleep(3)
# Check again, just in case new updates synced from remote (OneDrive/Dropbox)
# during this process!
# The logic is not 100% fool proof, but should be close to 99.99%
sync_md_updates() # Recursive call
return updates_found
def check_if_image_added(md_text, md_file):
if not '.textbundle/' in md_file:
return False
matches = re.findall(r'!\[.*?\]\(assets/(.+?_).+?\)', md_text)
for image_match in matches:
'F89CDA3D-3FCC-4E92-88C1-CC4AF46FA733-10097-00002BBE9F7FF804_IMG_2280.JPG'
if not re.match(r'[0-9A-F]{8}-([0-9A-F]{4}-){3}[0-9A-F]{12}-[0-9A-F]{3,5}-[0-9A-F]{16}_', image_match):
return True
return False
def textbundle_to_bear(md_text, md_file, mod_dt):
md_text = restore_tags(md_text)
bundle = os.path.split(md_file)[0]
match = re.search(r'\{BearID:(.+?)\}', md_text)
if match:
uuid = match.group(1)
# Remove old BearID: from new note
md_text = re.sub(r'\<\!-- ?\{BearID\:' + uuid + r'\} ?--\>', '', md_text).rstrip() + '\n'
md_text = insert_link_top_note(md_text, 'Images added! Link to original note: ', uuid)
else:
# New textbundle (with images), add path as tag:
md_text = get_tag_from_path(md_text, bundle, export_path)
write_file(md_file, md_text, mod_dt)
os.utime(bundle, (-1, mod_dt))
subprocess.call(['open', '-a', '/applications/bear.app', bundle])
time.sleep(0.5)
def backup_ext_note(md_file):
if '.textbundle' in md_file:
bundle_path = os.path.split(md_file)[0]
bundle_name = os.path.split(bundle_path)[1]
target = os.path.join(sync_backup, bundle_name)
bundle_raw = os.path.splitext(target)[0]
count = 2
while os.path.exists(target):
# Adding sequence number to identical filenames, preventing overwrite:
target = bundle_raw + " - " + str(count).zfill(2) + ".textbundle"
count += 1
shutil.copytree(bundle_path, target)
else:
# Overwrite former bacups of incoming changes, only keeps last one:
shutil.copy2(md_file, sync_backup + '/')
def update_sync_time_file(ts):
write_file(sync_ts_file,
"Checked for Markdown updates to sync at: " +
datetime.datetime.now().strftime("%Y-%m-%d at %H:%M:%S"), ts)
def update_bear_note(md_text, md_file, ts, ts_last_export):
md_text = restore_tags(md_text)
md_text = restore_image_links(md_text)
uuid = ''
match = re.search(r'\{BearID:(.+?)\}', md_text)
sync_conflict = False
if match:
uuid = match.group(1)
# Remove old BearID: from new note
md_text = re.sub(r'\<\!-- ?\{BearID\:' + uuid + r'\} ?--\>', '', md_text).rstrip() + '\n'
sync_conflict = check_sync_conflict(uuid, ts_last_export)
if sync_conflict:
link_original = 'bear://x-callback-url/open-note?id=' + uuid
message = '::Sync conflict! External update: ' + time_stamp_ts(ts) + '::'
message += '\n[Click here to see original Bear note](' + link_original + ')'
x_create = 'bear://x-callback-url/create?show_window=no'
bear_x_callback(x_create, md_text, message, '')
else:
# Regular external update
orig_title = backup_bear_note(uuid)
# message = '::External update: ' + time_stamp_ts(ts) + '::'
x_replace = 'bear://x-callback-url/add-text?show_window=no&mode=replace_all&id=' + uuid
bear_x_callback(x_replace, md_text, '', orig_title)
# # Trash old original note:
# x_trash = 'bear://x-callback-url/trash?show_window=no&id=' + uuid
# subprocess.call(["open", x_trash])
# time.sleep(.2)
else:
# New external md Note, since no Bear uuid found in text:
# message = '::New external Note - ' + time_stamp_ts(ts) + '::'
md_text = get_tag_from_path(md_text, md_file, export_path)
x_create = 'bear://x-callback-url/create?show_window=no'
bear_x_callback(x_create, md_text, '', '')
return
def get_tag_from_path(md_text, md_file, root_path, inbox_for_root=True, extra_tag=''):
# extra_tag should be passed as '#tag' or '#space tag#'
path = md_file.replace(root_path, '')[1:]
sub_path = os.path.split(path)[0]
tags = []
if '.textbundle' in sub_path:
sub_path = os.path.split(sub_path)[0]
if sub_path == '':
if inbox_for_root:
tag = '#.inbox'
else:
tag = ''
elif sub_path.startswith('_'):
tag = '#.' + sub_path[1:].strip()
else:
tag = '#' + sub_path.strip()
if ' ' in tag:
tag += "#"
if tag != '':
tags.append(tag)
if extra_tag != '':
tags.append(extra_tag)
for tag in get_file_tags(md_file):
tag = '#' + tag.strip()
if ' ' in tag: tag += "#"
tags.append(tag)
return md_text.strip() + '\n\n' + ' '.join(tags) + '\n'
def get_file_tags(md_file):
try:
subprocess.call([gettag_sh, md_file, gettag_txt])
text = re.sub(r'\\n\d{1,2}', r'', read_file(gettag_txt))
tag_list = json.loads(text)
return tag_list
except:
return []
def bear_x_callback(x_command, md_text, message, orig_title):
if message != '':
lines = md_text.splitlines()
lines.insert(1, message)
md_text = '\n'.join(lines)
## 2018-02-23 at 22:41:
## Using new `/add-text` mode: `replace_all` including changes to title.
# if orig_title != '':
# lines = md_text.splitlines()
# title = re.sub(r'^#+ ', r'', lines[0])
# if title != orig_title:
# md_text = '\n'.join(lines)
# else:
# md_text = '\n'.join(lines[1:])
x_command_text = x_command + '&text=' + urllib.parse.quote(md_text)
subprocess.call(["open", x_command_text])
time.sleep(.2)
def check_sync_conflict(uuid, ts_last_export):
conflict = False
# Check modified date of original note in Bear sqlite db!
with sqlite3.connect(bear_db) as conn:
conn.row_factory = sqlite3.Row
query = "SELECT * FROM `ZSFNOTE` WHERE `ZTRASHED` LIKE '0' AND `ZUNIQUEIDENTIFIER` LIKE '" + uuid + "'"
c = conn.execute(query)
for row in c:
modified = row['ZMODIFICATIONDATE']
uuid = row['ZUNIQUEIDENTIFIER']
mod_dt = dt_conv(modified)
conflict = mod_dt > ts_last_export
return conflict
def backup_bear_note(uuid):
# Get single note from Bear sqlite db!
with sqlite3.connect(bear_db) as conn:
conn.row_factory = sqlite3.Row
query = "SELECT * FROM `ZSFNOTE` WHERE `ZUNIQUEIDENTIFIER` LIKE '" + uuid + "'"
c = conn.execute(query)
title = ''
for row in c: # Will only get one row if uuid is found!
title = row['ZTITLE']
md_text = row['ZTEXT'].rstrip()
modified = row['ZMODIFICATIONDATE']
mod_dt = dt_conv(modified)
created = row['ZCREATIONDATE']
cre_dt = dt_conv(created)
md_text = insert_link_top_note(md_text, 'Link to updated note: ', uuid)
dtdate = datetime.datetime.fromtimestamp(cre_dt)
filename = clean_title(title) + dtdate.strftime(' - %Y-%m-%d_%H%M')
if not os.path.exists(sync_backup):
os.makedirs(sync_backup)
file_part = os.path.join(sync_backup, filename)
# This is a Bear text file, not exactly markdown.
backup_file = file_part + ".txt"
count = 2
while os.path.exists(backup_file):
# Adding sequence number to identical filenames, preventing overwrite:
backup_file = file_part + " - " + str(count).zfill(2) + ".txt"
count += 1
write_file(backup_file, md_text, mod_dt)
filename2 = os.path.split(backup_file)[1]
write_log('Original to sync_backup: ' + filename2)
return title
def insert_link_top_note(md_text, message, uuid):
lines = md_text.split('\n')
title = re.sub(r'^#{1,6} ', r'', lines[0])
link = '::' + message + '[' + title + '](bear://x-callback-url/open-note?id=' + uuid + ')::'
lines.insert(1, link)
return '\n'.join(lines)
def init_gettag_script():
gettag_script = \
'''#!/bin/bash
if [[ ! -e $1 ]] ; then
echo 'file missing or not specified'
exit 0
fi
JSON="$(xattr -p com.apple.metadata:_kMDItemUserTags "$1" | xxd -r -p | plutil -convert json - -o -)"
echo $JSON > "$2"
'''
temp = os.path.join(HOME, 'temp')
if not os.path.exists(temp):
os.makedirs(temp)
write_file(gettag_sh, gettag_script, 0)
subprocess.call(['chmod', '777', gettag_sh])
def notify(message):
title = "ul_sync_md.py"
try:
# Uses "terminal-notifier", download at:
# https://github.com/julienXX/terminal-notifier/releases/download/2.0.0/terminal-notifier-2.0.0.zip
# Only works with MacOS 10.11+
subprocess.call(['/Applications/terminal-notifier.app/Contents/MacOS/terminal-notifier',
'-message', message, "-title", title, '-sound', 'default'])
except:
write_log('"terminal-notifier.app" is missing!')
return
if __name__ == '__main__':
main()
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
app/api/db_config.py | import psycopg2
import psycopg2.extras
import os
url = os.getenv('DATABASE_URL')
def connection(url):
conn = psycopg2.connect(url)
return conn
def init_db():
con = connection(url)
return con
def create_tables():
conn = connection(url)
curr = conn.cursor()
queries = tables()
for query in queries:
curr.execute(query)
conn.commit()
def tables():
users_table = '''CREATE TABLE IF NOT EXISTS users(
id serial PRIMARY KEY,
firstname char(20) NOT NULL,
lastname char(20) NOT NULL,
email char(50) NOT NULL,
username char(20) NOT NULL,
phone char(14) NOT NULL,
isAdmin BOOLEAN DEFAULT False,
password char(100) NOT NULL,
registered DATE NOT NULL DEFAULT CURRENT_DATE)
'''
incidents_table = '''CREATE TABLE IF NOT EXISTS incidents(
id serial PRIMARY KEY,
title char(100) NOT NULL,
incident char(50) NOT NULL,
location char(100) NOT NULL,
status char(30) DEFAULT 'Draft',
description char(200) NOT NULL,
images char(100) NOT NULL,
createdBy char(100) NOT NULL,
createdOn DATE NOT NULL DEFAULT CURRENT_DATE) '''
queries = [users_table, incidents_table]
return queries
def destroy_tables():
conn = connection(url)
curr = conn.cursor()
users_table = ''' DROP TABLE IF EXISTS users CASCADE'''
incidents_table = ''' DROP TABLE IF EXISTS incidents CASCADE'''
queries = [users_table, incidents_table]
for query in queries:
curr.execute(query)
conn.commit()
| []
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | python | 1 | 0 | |
mint/run/core/aws-sdk-go/main.go | /*
*
* Mint, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package main
import (
"bytes"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"math/rand"
"net/http"
"os"
"reflect"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
log "github.com/sirupsen/logrus"
)
const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
// different kinds of test failures
const (
PASS = "PASS" // Indicate that a test passed
FAIL = "FAIL" // Indicate that a test failed
)
type errorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
BucketName string
Key string
RequestID string `xml:"RequestId"`
HostID string `xml:"HostId"`
// Region where the bucket is located. This header is returned
// only in HEAD bucket and ListObjects response.
Region string
// Headers of the returned S3 XML error
Headers http.Header `xml:"-" json:"-"`
}
type mintJSONFormatter struct {
}
func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
data := make(log.Fields, len(entry.Data))
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
}
return append(serialized, '\n'), nil
}
// log successful test runs
func successLogger(function string, args map[string]interface{}, startTime time.Time) *log.Entry {
// calculate the test case duration
duration := time.Since(startTime)
// log with the fields as per mint
fields := log.Fields{"name": "aws-sdk-go", "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": PASS}
return log.WithFields(fields)
}
// log failed test runs
func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
// calculate the test case duration
duration := time.Since(startTime)
var fields log.Fields
// log with the fields as per mint
if err != nil {
fields = log.Fields{"name": "aws-sdk-go", "function": function, "args": args,
"duration": duration.Nanoseconds() / 1000000, "status": FAIL, "alert": alert, "message": message, "error": err}
} else {
fields = log.Fields{"name": "aws-sdk-go", "function": function, "args": args,
"duration": duration.Nanoseconds() / 1000000, "status": FAIL, "alert": alert, "message": message}
}
return log.WithFields(fields)
}
func randString(n int, src rand.Source, prefix string) string {
b := make([]byte, n)
// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return prefix + string(b[0:30-len(prefix)])
}
func isObjectTaggingImplemented(s3Client *s3.S3) bool {
bucket := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
object := randString(60, rand.NewSource(time.Now().UnixNano()), "")
startTime := time.Now()
function := "isObjectTaggingImplemented"
args := map[string]interface{}{
"bucketName": bucket,
"objectName": object,
}
defer cleanup(s3Client, bucket, object, function, args, startTime, true)
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucket),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
return false
}
_, err = s3Client.PutObject(&s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader("testfile")),
Bucket: aws.String(bucket),
Key: aws.String(object),
})
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to success but got %v", err), err).Fatal()
return false
}
_, err = s3Client.GetObjectTagging(&s3.GetObjectTaggingInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "NotImplemented" {
return false
}
}
}
return true
}
func cleanup(s3Client *s3.S3, bucket string, object string, function string,
args map[string]interface{}, startTime time.Time, deleteBucket bool) {
// Deleting the object, just in case it was created. Will not check for errors.
s3Client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
})
if deleteBucket {
_, err := s3Client.DeleteBucket(&s3.DeleteBucketInput{
Bucket: aws.String(bucket),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go DeleteBucket Failed", err).Fatal()
return
}
}
}
func testPresignedPutInvalidHash(s3Client *s3.S3) {
startTime := time.Now()
function := "PresignedPut"
bucket := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
object := "presignedTest"
expiry := 1 * time.Minute
args := map[string]interface{}{
"bucketName": bucket,
"objectName": object,
"expiry": expiry,
}
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucket),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
return
}
defer cleanup(s3Client, bucket, object, function, args, startTime, true)
req, _ := s3Client.PutObjectRequest(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
ContentType: aws.String("application/octet-stream"),
})
req.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "invalid-sha256")
url, err := req.Presign(expiry)
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go presigned Put request creation failed", err).Fatal()
return
}
rreq, err := http.NewRequest(http.MethodPut, url, bytes.NewReader([]byte("")))
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go presigned PUT request failed", err).Fatal()
return
}
rreq.Header.Set("X-Amz-Content-Sha256", "invalid-sha256")
rreq.Header.Set("Content-Type", "application/octet-stream")
resp, err := http.DefaultClient.Do(rreq)
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go presigned put request failed", err).Fatal()
return
}
defer resp.Body.Close()
dec := xml.NewDecoder(resp.Body)
errResp := errorResponse{}
err = dec.Decode(&errResp)
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go unmarshalling xml failed", err).Fatal()
return
}
if errResp.Code != "XAmzContentSHA256Mismatch" {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go presigned PUT expected to fail with XAmzContentSHA256Mismatch but got %v", errResp.Code), errors.New("AWS S3 error code mismatch")).Fatal()
return
}
successLogger(function, args, startTime).Info()
}
func testListObjects(s3Client *s3.S3) {
startTime := time.Now()
function := "testListObjects"
bucket := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
object1 := "testObject1"
object2 := "testObject2"
expiry := 1 * time.Minute
args := map[string]interface{}{
"bucketName": bucket,
"objectName1": object1,
"objectName2": object2,
"expiry": expiry,
}
getKeys := func(objects []*s3.Object) []string {
var rv []string
for _, obj := range objects {
rv = append(rv, *obj.Key)
}
return rv
}
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucket),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
return
}
defer cleanup(s3Client, bucket, object1, function, args, startTime, true)
defer cleanup(s3Client, bucket, object2, function, args, startTime, false)
listInput := &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
MaxKeys: aws.Int64(1000),
Prefix: aws.String(""),
}
result, err := s3Client.ListObjectsV2(listInput)
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go listobjects expected to success but got %v", err), err).Fatal()
return
}
if *result.KeyCount != 0 {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go listobjects with prefix '' expected 0 key but got %v, %v", result.KeyCount, getKeys(result.Contents)), errors.New("AWS S3 key count mismatch")).Fatal()
return
}
putInput1 := &s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
Bucket: aws.String(bucket),
Key: aws.String(object1),
}
_, err = s3Client.PutObject(putInput1)
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to success but got %v", err), err).Fatal()
return
}
putInput2 := &s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
Bucket: aws.String(bucket),
Key: aws.String(object2),
}
_, err = s3Client.PutObject(putInput2)
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to success but got %v", err), err).Fatal()
return
}
result, err = s3Client.ListObjectsV2(listInput)
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go listobjects expected to success but got %v", err), err).Fatal()
return
}
if *result.KeyCount != 2 {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go listobjects with prefix '' expected 2 key but got %v, %v", *result.KeyCount, getKeys(result.Contents)), errors.New("AWS S3 key count mismatch")).Fatal()
return
}
successLogger(function, args, startTime).Info()
}
func testSelectObject(s3Client *s3.S3) {
startTime := time.Now()
function := "testSelectObject"
bucket := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
object1 := "object1.csv"
object2 := "object2.csv"
args := map[string]interface{}{
"bucketName": bucket,
"objectName1": object1,
"objectName2": object2,
}
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucket),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
return
}
// Test comma field separator
inputCsv1 := `year,gender,ethnicity,firstname,count,rank
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,SOPHIA,119,1
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,CHLOE,106,2
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,EMILY,93,3
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,OLIVIA,89,4
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,EMMA,75,5
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,ISABELLA,67,6
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,TIFFANY,54,7
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,ASHLEY,52,8
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,FIONA,48,9
2011,FEMALE,ASIAN AND PACIFIC ISLANDER,ANGELA,47,10
`
outputCSV1 := `2011
2011
2011
2011
2011
2011
2011
2011
2011
2011
`
putInput1 := &s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader(inputCsv1)),
Bucket: aws.String(bucket),
Key: aws.String(object1),
}
_, err = s3Client.PutObject(putInput1)
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go Select object failed %v", err), err).Fatal()
return
}
defer cleanup(s3Client, bucket, object1, function, args, startTime, true)
params := &s3.SelectObjectContentInput{
Bucket: &bucket,
Key: &object1,
ExpressionType: aws.String(s3.ExpressionTypeSql),
Expression: aws.String("SELECT s._1 FROM S3Object s"),
RequestProgress: &s3.RequestProgress{},
InputSerialization: &s3.InputSerialization{
CompressionType: aws.String("NONE"),
CSV: &s3.CSVInput{
FileHeaderInfo: aws.String(s3.FileHeaderInfoIgnore),
FieldDelimiter: aws.String(","),
RecordDelimiter: aws.String("\n"),
},
},
OutputSerialization: &s3.OutputSerialization{
CSV: &s3.CSVOutput{},
},
}
resp, err := s3Client.SelectObjectContent(params)
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go Select object failed %v", err), err).Fatal()
return
}
defer resp.EventStream.Close()
payload := ""
for event := range resp.EventStream.Events() {
switch v := event.(type) {
case *s3.RecordsEvent:
// s3.RecordsEvent.Records is a byte slice of select records
payload = string(v.Payload)
}
}
if err := resp.EventStream.Err(); err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go Select object failed %v", err), err).Fatal()
return
}
if payload != outputCSV1 {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go Select object output mismatch %v", payload), errors.New("AWS S3 select object mismatch")).Fatal()
return
}
// Test unicode field separator
inputCsv2 := `"year"╦"gender"╦"ethnicity"╦"firstname"╦"count"╦"rank"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"SOPHIA"╦"119"╦"1"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"CHLOE"╦"106"╦"2"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"EMILY"╦"93"╦"3"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"OLIVIA"╦"89"╦"4"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"EMMA"╦"75"╦"5"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"ISABELLA"╦"67"╦"6"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"TIFFANY"╦"54"╦"7"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"ASHLEY"╦"52"╦"8"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"FIONA"╦"48"╦"9"
"2011"╦"FEMALE"╦"ASIAN AND PACIFIC ISLANDER"╦"ANGELA"╦"47"╦"10"
`
outputCSV2 := `2011
2011
2011
2011
2011
2011
2011
2011
2011
2011
`
putInput2 := &s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader(inputCsv2)),
Bucket: aws.String(bucket),
Key: aws.String(object2),
}
_, err = s3Client.PutObject(putInput2)
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go Select object upload failed: %v", err), err).Fatal()
return
}
defer cleanup(s3Client, bucket, object2, function, args, startTime, false)
params2 := &s3.SelectObjectContentInput{
Bucket: &bucket,
Key: &object2,
ExpressionType: aws.String(s3.ExpressionTypeSql),
Expression: aws.String("SELECT s._1 FROM S3Object s"),
RequestProgress: &s3.RequestProgress{},
InputSerialization: &s3.InputSerialization{
CompressionType: aws.String("NONE"),
CSV: &s3.CSVInput{
FileHeaderInfo: aws.String(s3.FileHeaderInfoIgnore),
FieldDelimiter: aws.String("╦"),
RecordDelimiter: aws.String("\n"),
},
},
OutputSerialization: &s3.OutputSerialization{
CSV: &s3.CSVOutput{},
},
}
resp, err = s3Client.SelectObjectContent(params2)
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go Select object failed for unicode separator %v", err), err).Fatal()
return
}
defer resp.EventStream.Close()
for event := range resp.EventStream.Events() {
switch v := event.(type) {
case *s3.RecordsEvent:
// s3.RecordsEvent.Records is a byte slice of select records
payload = string(v.Payload)
}
}
if err := resp.EventStream.Err(); err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go Select object failed for unicode separator %v", err), err).Fatal()
return
}
if payload != outputCSV2 {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go Select object output mismatch %v", payload), errors.New("AWS S3 select object mismatch")).Fatal()
return
}
successLogger(function, args, startTime).Info()
}
func testObjectTagging(s3Client *s3.S3) {
startTime := time.Now()
function := "testObjectTagging"
bucket := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
object := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args := map[string]interface{}{
"bucketName": bucket,
"objectName": object,
}
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucket),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
return
}
defer cleanup(s3Client, bucket, object, function, args, startTime, true)
taginput := "Tag1=Value1"
tagInputSet := []*s3.Tag{
{
Key: aws.String("Tag1"),
Value: aws.String("Value1"),
},
}
_, err = s3Client.PutObject(&s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader("testfile")),
Bucket: aws.String(bucket),
Key: aws.String(object),
Tagging: &taginput,
})
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to success but got %v", err), err).Fatal()
return
}
tagop, err := s3Client.GetObjectTagging(&s3.GetObjectTaggingInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUTObjectTagging expected to success but got %v", awsErr.Code()), err).Fatal()
return
}
}
if !reflect.DeepEqual(tagop.TagSet, tagInputSet) {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUTObject Tag input did not match with GetObjectTagging output %v", nil), nil).Fatal()
return
}
taginputSet1 := []*s3.Tag{
{
Key: aws.String("Key4"),
Value: aws.String("Value4"),
},
}
_, err = s3Client.PutObjectTagging(&s3.PutObjectTaggingInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
Tagging: &s3.Tagging{
TagSet: taginputSet1,
},
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUTObjectTagging expected to success but got %v", awsErr.Code()), err).Fatal()
return
}
}
tagop, err = s3Client.GetObjectTagging(&s3.GetObjectTaggingInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUTObjectTagging expected to success but got %v", awsErr.Code()), err).Fatal()
return
}
}
if !reflect.DeepEqual(tagop.TagSet, taginputSet1) {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUTObjectTagging input did not match with GetObjectTagging output %v", nil), nil).Fatal()
return
}
successLogger(function, args, startTime).Info()
}
func testObjectTaggingErrors(s3Client *s3.S3) {
startTime := time.Now()
function := "testObjectTaggingErrors"
bucket := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
object := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args := map[string]interface{}{
"bucketName": bucket,
"objectName": object,
}
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucket),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
return
}
defer cleanup(s3Client, bucket, object, function, args, startTime, true)
_, err = s3Client.PutObject(&s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader("testfile")),
Bucket: aws.String(bucket),
Key: aws.String(object),
})
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to success but got %v", err), err).Fatal()
return
}
// case 1 : Too many tags > 10
input := &s3.PutObjectTaggingInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
Tagging: &s3.Tagging{
TagSet: []*s3.Tag{
{
Key: aws.String("Key1"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key2"),
Value: aws.String("Value4"),
},
{
Key: aws.String("Key3"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key4"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key5"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key6"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key7"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key8"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key9"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key10"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key11"),
Value: aws.String("Value3"),
},
},
},
}
_, err = s3Client.PutObjectTagging(input)
if err == nil {
failureLog(function, args, startTime, "", "AWS SDK Go PUT expected to fail but succeeded", err).Fatal()
return
}
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() != "BadRequest" && aerr.Message() != "BadRequest: Object tags cannot be greater than 10" {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to fail but got %v", err), err).Fatal()
return
}
}
// case 2 : Duplicate Tag Keys
input = &s3.PutObjectTaggingInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
Tagging: &s3.Tagging{
TagSet: []*s3.Tag{
{
Key: aws.String("Key1"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key1"),
Value: aws.String("Value4"),
},
},
},
}
_, err = s3Client.PutObjectTagging(input)
if err == nil {
failureLog(function, args, startTime, "", "AWS SDK Go PUT expected to fail but succeeded", err).Fatal()
return
}
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() != "InvalidTag" && aerr.Message() != "InvalidTag: Cannot provide multiple Tags with the same key" {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to fail but got %v", err), err).Fatal()
return
}
}
// case 3 : Too long Tag Key
input = &s3.PutObjectTaggingInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
Tagging: &s3.Tagging{
TagSet: []*s3.Tag{
{
Key: aws.String("Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1"),
Value: aws.String("Value3"),
},
{
Key: aws.String("Key1"),
Value: aws.String("Value4"),
},
},
},
}
_, err = s3Client.PutObjectTagging(input)
if err == nil {
failureLog(function, args, startTime, "", "AWS SDK Go PUT expected to fail but succeeded", err).Fatal()
return
}
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() != "InvalidTag" && aerr.Message() != "InvalidTag: The TagKey you have provided is invalid" {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to fail but got %v", err), err).Fatal()
return
}
}
// case 4 : Too long Tag value
input = &s3.PutObjectTaggingInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
Tagging: &s3.Tagging{
TagSet: []*s3.Tag{
{
Key: aws.String("Key1"),
Value: aws.String("Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1Key1"),
},
{
Key: aws.String("Key1"),
Value: aws.String("Value4"),
},
},
},
}
_, err = s3Client.PutObjectTagging(input)
if err == nil {
failureLog(function, args, startTime, "", "AWS SDK Go PUT expected to fail but succeeded", err).Fatal()
return
}
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() != "InvalidTag" && aerr.Message() != "InvalidTag: The TagValue you have provided is invalid" {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to fail but got %v", err), err).Fatal()
return
}
}
successLogger(function, args, startTime).Info()
}
// Tests bucket re-create errors.
func testCreateBucketError(s3Client *s3.S3) {
region := s3Client.Config.Region
// Amazon S3 returns error in all AWS Regions except in the North Virginia Region.
// More details in https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#S3.CreateBucket
s3Client.Config.Region = aws.String("us-west-1")
// initialize logging params
startTime := time.Now()
function := "testMakeBucketError"
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
args := map[string]interface{}{
"bucketName": bucketName,
}
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
return
}
defer cleanup(s3Client, bucketName, "", function, args, startTime, true)
_, errCreating := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
if errCreating == nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Should Return Error for Existing bucket", err).Fatal()
return
}
// Verify valid error response from server.
if errCreating.(s3.RequestFailure).Code() != "BucketAlreadyExists" &&
errCreating.(s3.RequestFailure).Code() != "BucketAlreadyOwnedByYou" {
failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal()
return
}
// Restore region in s3Client
s3Client.Config.Region = region
successLogger(function, args, startTime).Info()
}
func testListMultipartUploads(s3Client *s3.S3) {
startTime := time.Now()
function := "testListMultipartUploads"
bucket := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
object := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args := map[string]interface{}{
"bucketName": bucket,
"objectName": object,
}
_, errCreating := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucket),
})
if errCreating != nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", errCreating).Fatal()
return
}
defer cleanup(s3Client, bucket, object, function, args, startTime, true)
multipartUpload, err := s3Client.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go createMultipartupload API failed", err).Fatal()
return
}
parts := make(map[*int64]*string)
for i := 0; i < 5; i++ {
result, errUpload := s3Client.UploadPart(&s3.UploadPartInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
UploadId: multipartUpload.UploadId,
PartNumber: aws.Int64(int64(i + 1)),
Body: aws.ReadSeekCloser(strings.NewReader("fileToUpload")),
})
if errUpload != nil {
_, _ = s3Client.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
UploadId: multipartUpload.UploadId,
})
failureLog(function, args, startTime, "", "AWS SDK Go uploadPart API failed for", errUpload).Fatal()
return
}
parts[aws.Int64(int64(i+1))] = result.ETag
}
listParts, errParts := s3Client.ListParts(&s3.ListPartsInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
UploadId: multipartUpload.UploadId,
})
if errParts != nil {
failureLog(function, args, startTime, "", "AWS SDK Go ListPartsInput API failed for", err).Fatal()
return
}
if len(parts) != len(listParts.Parts) {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go ListParts.Parts len mismatch want: %v got: %v", len(parts), len(listParts.Parts)), err).Fatal()
return
}
for _, part := range listParts.Parts {
if tag, ok := parts[part.PartNumber]; ok {
if tag != part.ETag {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go ListParts.Parts output mismatch want: %v got: %v", tag, part.ETag), err).Fatal()
return
}
}
}
_, err = s3Client.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
Bucket: aws.String(bucket),
Key: aws.String(object),
UploadId: multipartUpload.UploadId,
})
if err != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go AbortMultipartUpload failed"), err).Fatal()
return
}
successLogger(function, args, startTime).Info()
}
func testSSECopyObject(s3Client *s3.S3) {
// initialize logging params
startTime := time.Now()
function := "testSSECopyObjectSourceEncrypted"
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "aws-sdk-go-test-")
object := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args := map[string]interface{}{
"bucketName": bucketName,
"objectName": object,
}
_, err := s3Client.CreateBucket(&s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
if err != nil {
failureLog(function, args, startTime, "", "AWS SDK Go CreateBucket Failed", err).Fatal()
return
}
defer cleanup(s3Client, bucketName, object+"-enc", function, args, startTime, true)
defer cleanup(s3Client, bucketName, object+"-noenc", function, args, startTime, false)
var wrongSuccess = errors.New("Succeeded instead of failing. ")
// create encrypted object
sseCustomerKey := aws.String("32byteslongsecretkeymustbegiven2")
_, errPutEnc := s3Client.PutObject(&s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader("fileToUpload")),
Bucket: aws.String(bucketName),
Key: aws.String(object + "-enc"),
SSECustomerAlgorithm: aws.String("AES256"),
SSECustomerKey: sseCustomerKey,
})
if errPutEnc != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to succeed but got %v", errPutEnc), errPutEnc).Fatal()
return
}
// copy the encrypted object
_, errCopyEnc := s3Client.CopyObject(&s3.CopyObjectInput{
SSECustomerAlgorithm: aws.String("AES256"),
SSECustomerKey: sseCustomerKey,
CopySource: aws.String(bucketName + "/" + object + "-enc"),
Bucket: aws.String(bucketName),
Key: aws.String(object + "-copy"),
})
if errCopyEnc == nil {
failureLog(function, args, startTime, "", "AWS SDK Go CopyObject expected to fail, but it succeeds ", wrongSuccess).Fatal()
return
}
var invalidSSECustomerAlgorithm = "Requests specifying Server Side Encryption with Customer provided keys must provide a valid encryption algorithm"
if !strings.Contains(errCopyEnc.Error(), invalidSSECustomerAlgorithm) {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go CopyObject expected error %v got %v", invalidSSECustomerAlgorithm, errCopyEnc), errCopyEnc).Fatal()
return
}
// create non-encrypted object
_, errPut := s3Client.PutObject(&s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader("fileToUpload")),
Bucket: aws.String(bucketName),
Key: aws.String(object + "-noenc"),
})
if errPut != nil {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go PUT expected to succeed but got %v", errPut), errPut).Fatal()
return
}
// copy the non-encrypted object
_, errCopy := s3Client.CopyObject(&s3.CopyObjectInput{
CopySourceSSECustomerAlgorithm: aws.String("AES256"),
CopySourceSSECustomerKey: sseCustomerKey,
SSECustomerAlgorithm: aws.String("AES256"),
SSECustomerKey: sseCustomerKey,
CopySource: aws.String(bucketName + "/" + object + "-noenc"),
Bucket: aws.String(bucketName),
Key: aws.String(object + "-copy"),
})
if errCopy == nil {
failureLog(function, args, startTime, "", "AWS SDK Go CopyObject expected to fail, but it succeeds ", wrongSuccess).Fatal()
return
}
var invalidEncryptionParameters = "The encryption parameters are not applicable to this object."
if !strings.Contains(errCopy.Error(), invalidEncryptionParameters) {
failureLog(function, args, startTime, "", fmt.Sprintf("AWS SDK Go CopyObject expected error %v got %v", invalidEncryptionParameters, errCopy), errCopy).Fatal()
return
}
successLogger(function, args, startTime).Info()
}
func main() {
endpoint := os.Getenv("SERVER_ENDPOINT")
accessKey := os.Getenv("ACCESS_KEY")
secretKey := os.Getenv("SECRET_KEY")
secure := os.Getenv("ENABLE_HTTPS")
sdkEndpoint := "http://" + endpoint
if secure == "1" {
sdkEndpoint = "https://" + endpoint
}
creds := credentials.NewStaticCredentials(accessKey, secretKey, "")
newSession := session.New()
s3Config := &aws.Config{
Credentials: creds,
Endpoint: aws.String(sdkEndpoint),
Region: aws.String("us-east-1"),
S3ForcePathStyle: aws.Bool(true),
}
// Create an S3 service object in the default region.
s3Client := s3.New(newSession, s3Config)
// Output to stdout instead of the default stderr
log.SetOutput(os.Stdout)
// create custom formatter
mintFormatter := mintJSONFormatter{}
// set custom formatter
log.SetFormatter(&mintFormatter)
// log Info or above -- success cases are Info level, failures are Fatal level
log.SetLevel(log.InfoLevel)
// execute tests
testPresignedPutInvalidHash(s3Client)
testListObjects(s3Client)
testSelectObject(s3Client)
testCreateBucketError(s3Client)
testListMultipartUploads(s3Client)
if secure == "1" {
testSSECopyObject(s3Client)
}
if isObjectTaggingImplemented(s3Client) {
testObjectTagging(s3Client)
testObjectTaggingErrors(s3Client)
}
}
| [
"\"SERVER_ENDPOINT\"",
"\"ACCESS_KEY\"",
"\"SECRET_KEY\"",
"\"ENABLE_HTTPS\""
]
| []
| [
"ACCESS_KEY",
"SECRET_KEY",
"ENABLE_HTTPS",
"SERVER_ENDPOINT"
]
| [] | ["ACCESS_KEY", "SECRET_KEY", "ENABLE_HTTPS", "SERVER_ENDPOINT"] | go | 4 | 0 | |
Tests/test_Clustalw_tool.py | # Copyright 2008-2011 by Peter Cock. All rights reserved.
# Revisions copyright 2012 by Christian Brueffer. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# TODO - Clean up the extra files created by clustalw? e.g. *.dnd
# and *.aln where we have not requested an explicit name?
"""Tests for Clustalw tool."""
from Bio import MissingExternalDependencyError
import sys
import os
import unittest
from Bio import SeqIO
from Bio import AlignIO
from Bio.Align.Applications import ClustalwCommandline
from Bio.Application import ApplicationError
#################################################################
# Try to avoid problems when the OS is in another language
os.environ["LANG"] = "C"
clustalw_exe = None
if sys.platform == "win32":
# TODO - Check the path?
try:
# This can vary depending on the Windows language.
prog_files = os.environ["PROGRAMFILES"]
except KeyError:
prog_files = r"C:\Program Files"
# Note that EBI's clustalw2 installer, e.g. clustalw-2.0.10-win.msi
# uses C:\Program Files\ClustalW2\clustalw2.exe so we should check
# for that.
#
# Some users doing a manual install have reported using
# C:\Program Files\clustalw.exe
#
# Older installers might use something like this,
# C:\Program Files\Clustalw\clustalw.exe
#
# One particular case is www.tc.cornell.edu currently provide a
# clustalw1.83 installer which uses the following long location:
# C:\Program Files\CTCBioApps\clustalw\v1.83\clustalw1.83.exe
likely_dirs = [
"ClustalW2",
"",
"Clustal",
"Clustalw",
"Clustalw183",
"Clustalw1.83",
r"CTCBioApps\clustalw\v1.83",
]
likely_exes = ["clustalw2.exe", "clustalw.exe", "clustalw1.83.exe"]
for folder in likely_dirs:
if os.path.isdir(os.path.join(prog_files, folder)):
for filename in likely_exes:
if os.path.isfile(os.path.join(prog_files, folder, filename)):
clustalw_exe = os.path.join(prog_files, folder, filename)
break
if clustalw_exe:
break
else:
from subprocess import getoutput
# Note that clustalw 1.83 and clustalw 2.1 don't obey the --version
# command, but this does cause them to quit cleanly. Otherwise they prompt
# the user for input (causing a lock up).
output = getoutput("clustalw2 --version")
# Since "not found" may be in another language, try and be sure this is
# really the clustalw tool's output
if "not found" not in output and "not recognized" not in output:
if "CLUSTAL" in output and "Multiple Sequence Alignments" in output:
clustalw_exe = "clustalw2"
if not clustalw_exe:
output = getoutput("clustalw --version")
if "not found" not in output and "not recognized" not in output:
if "CLUSTAL" in output and "Multiple Sequence Alignments" in output:
clustalw_exe = "clustalw"
if not clustalw_exe:
raise MissingExternalDependencyError(
"Install clustalw or clustalw2 if you want to use it from Biopython."
)
class ClustalWTestCase(unittest.TestCase):
"""Class implementing common functions for ClustalW tests."""
def setUp(self):
self.files_to_clean = set()
def tearDown(self):
for filename in self.files_to_clean:
if os.path.isfile(filename):
os.remove(filename)
def standard_test_procedure(self, cline):
"""Shared test procedure used by all tests."""
self.assertEqual(str(eval(repr(cline))), str(cline))
input_records = SeqIO.to_dict(
SeqIO.parse(cline.infile, "fasta"), lambda rec: rec.id.replace(":", "_")
) # noqa: E731
# Determine name of tree file
if cline.newtree:
tree_file = cline.newtree
else:
# Clustalw will name it based on the input file
tree_file = os.path.splitext(cline.infile)[0] + ".dnd"
# Mark generated files for later removal
self.add_file_to_clean(cline.outfile)
self.add_file_to_clean(tree_file)
output, error = cline()
self.assertTrue(output.strip().startswith("CLUSTAL"))
self.assertEqual(error.strip(), "")
# Check the output...
align = AlignIO.read(cline.outfile, "clustal")
# The length of the alignment will depend on the version of clustalw
# (clustalw 2.1 and clustalw 1.83 are certainly different).
output_records = SeqIO.to_dict(SeqIO.parse(cline.outfile, "clustal"))
self.assertEqual(set(input_records.keys()), set(output_records.keys()))
for record in align:
self.assertEqual(str(record.seq), str(output_records[record.id].seq))
self.assertEqual(
str(record.seq).replace("-", ""), str(input_records[record.id].seq)
)
# Check the DND file was created.
# TODO - Try and parse this with Bio.Nexus?
self.assertTrue(os.path.isfile(tree_file))
def add_file_to_clean(self, filename):
"""Add a file for deferred removal by the tearDown routine."""
self.files_to_clean.add(filename)
class ClustalWTestErrorConditions(ClustalWTestCase):
"""Test general error conditions."""
def test_empty_file(self):
"""Test a non-existing input file."""
input_file = "does_not_exist.fasta"
self.assertFalse(os.path.isfile(input_file))
cline = ClustalwCommandline(clustalw_exe, infile=input_file)
try:
stdout, stderr = cline()
except ApplicationError as err:
self.assertTrue(
"Cannot open sequence file" in str(err)
or "Cannot open input file" in str(err)
or "Non-zero return code " in str(err),
str(err),
)
else:
self.fail("expected an ApplicationError")
def test_single_sequence(self):
"""Test an input file containing a single sequence."""
input_file = "Fasta/f001"
self.assertTrue(os.path.isfile(input_file))
self.assertEqual(len(list(SeqIO.parse(input_file, "fasta"))), 1)
cline = ClustalwCommandline(clustalw_exe, infile=input_file)
try:
stdout, stderr = cline()
# Zero return code is a possible bug in clustalw 2.1?
self.assertIn("cannot do multiple alignment", (stdout + stderr))
except ApplicationError as err:
# Good, non-zero return code indicating an error in clustalw
# e.g. Using clustalw 1.83 get:
# Command 'clustalw -infile=Fasta/f001' returned non-zero exit status 4
pass
if os.path.isfile(input_file + ".aln"):
# Clustalw 2.1 made an emtpy aln file, clustalw 1.83 did not
self.add_file_to_clean(input_file + ".aln")
def test_invalid_sequence(self):
"""Test an input file containing an invalid sequence."""
input_file = "Medline/pubmed_result1.txt"
self.assertTrue(os.path.isfile(input_file))
cline = ClustalwCommandline(clustalw_exe, infile=input_file)
with self.assertRaises(ApplicationError) as cm:
stdout, stderr = cline()
self.fail("Should have failed, returned:\n%s\n%s" % (stdout, stderr))
err = str(cm.exception)
# Ideally we'd catch the return code and raise the specific
# error for "invalid format", rather than just notice there
# is not output file.
# Note:
# Python 2.3 on Windows gave (0, 'Error')
# Python 2.5 on Windows gives [Errno 0] Error
self.assertTrue(
"invalid format" in err
or "not produced" in err
or "No sequences in file" in err
or "Non-zero return code " in err
)
class ClustalWTestNormalConditions(ClustalWTestCase):
"""Tests for normal conditions."""
def test_properties(self):
"""Test passing options via properties."""
cline = ClustalwCommandline(clustalw_exe)
cline.infile = "Fasta/f002"
cline.outfile = "temp_test.aln"
cline.align = True
self.standard_test_procedure(cline)
def test_simple_fasta(self):
"""Test a simple fasta input file."""
input_file = "Fasta/f002"
output_file = "temp_test.aln"
cline = ClustalwCommandline(
clustalw_exe, infile=input_file, outfile=output_file
)
self.standard_test_procedure(cline)
def test_newtree(self):
"""Test newtree files."""
input_file = "Registry/seqs.fasta"
output_file = "temp_test.aln"
newtree_file = "temp_test.dnd"
cline = ClustalwCommandline(
clustalw_exe,
infile=input_file,
outfile=output_file,
newtree=newtree_file,
align=True,
)
self.standard_test_procedure(cline)
cline.newtree = "temp with space.dnd"
self.standard_test_procedure(cline)
def test_large_input_file(self):
"""Test a large input file."""
# Create a large input file by converting another example file
# (See Bug 2804, this will produce so much output on stdout that
# subprocess could suffer a deadlock and hang). Using all the
# records should show the deadlock but is very slow - just thirty
# seems to lockup on Mac OS X, even 20 on Linux (without the fix).
input_file = "temp_cw_prot.fasta"
records = list(SeqIO.parse("NBRF/Cw_prot.pir", "pir"))[:40]
with open(input_file, "w") as handle:
SeqIO.write(records, handle, "fasta")
del records
output_file = "temp_cw_prot.aln"
cline = ClustalwCommandline(
clustalw_exe, infile=input_file, outfile=output_file
)
self.add_file_to_clean(input_file)
self.standard_test_procedure(cline)
def test_input_filename_with_space(self):
"""Test an input filename containing a space."""
input_file = "Clustalw/temp horses.fasta"
with open(input_file, "w") as handle:
SeqIO.write(SeqIO.parse("Phylip/hennigian.phy", "phylip"), handle, "fasta")
output_file = "temp with space.aln"
cline = ClustalwCommandline(
clustalw_exe, infile=input_file, outfile=output_file
)
self.add_file_to_clean(input_file)
self.standard_test_procedure(cline)
def test_output_filename_with_spaces(self):
"""Test an output filename containing spaces."""
input_file = "GFF/multi.fna"
output_file = "temp with space.aln"
cline = ClustalwCommandline(
clustalw_exe, infile=input_file, outfile=output_file
)
self.standard_test_procedure(cline)
class ClustalWTestVersionTwoSpecific(ClustalWTestCase):
"""Tests specific to ClustalW2."""
def test_statistics(self):
"""Test a statistics file."""
if clustalw_exe == "clustalw2":
input_file = "Fasta/f002"
output_file = "temp_test.aln"
statistics_file = "temp_stats.txt"
cline = ClustalwCommandline(
clustalw_exe,
infile=input_file,
outfile=output_file,
stats=statistics_file,
)
self.add_file_to_clean(statistics_file)
self.standard_test_procedure(cline)
self.assertTrue(os.path.isfile(statistics_file))
else:
print("Skipping ClustalW2 specific test.")
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| []
| []
| [
"LANG",
"PROGRAMFILES"
]
| [] | ["LANG", "PROGRAMFILES"] | python | 2 | 0 | |
src/python/module/smartredis/client.py | # BSD 2-Clause License
#
# Copyright (c) 2021, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect
import os
import os.path as osp
import numpy as np
from .dataset import Dataset
from .error import RedisConnectionError, RedisReplyError
from .smartredisPy import PyClient
from .util import Dtypes, init_default
class Client(PyClient):
def __init__(self, address=None, cluster=False):
"""Initialize a RedisAI client
For clusters, the address can be a single tcp/ip address and port
of a database node. The rest of the cluster will be discovered
by the client itself. (e.g. address="127.0.0.1:6379")
If an address is not set, the client will look for the environment
variable ``SSDB`` (e.g. SSDB="127.0.0.1:6379;")
:param address: Address of the database
:param cluster: True if connecting to a redis cluster, defaults to False
:type cluster: bool, optional
:raises RedisConnectionError: if connection initialization fails
"""
if address:
self.__set_address(address)
if "SSDB" not in os.environ:
raise RedisConnectionError()
try:
super().__init__(cluster)
except RuntimeError as e:
raise RedisConnectionError(str(e)) from None
def put_tensor(self, key, data):
"""Put a tensor to a Redis database
:param key: key for tensor for be stored at
:type key: str
:param data: numpy array
:type data: np.array
:raises RedisReplyError: if put fails
"""
if not isinstance(data, np.ndarray):
raise TypeError("Argument provided was not a numpy array")
dtype = Dtypes.tensor_from_numpy(data)
try:
super().put_tensor(key, dtype, data)
except RuntimeError as e:
raise RedisReplyError(str(e), "put_tensor") from None
def get_tensor(self, key):
"""Get a tensor from the database
:param key: key to get tensor from
:type key: str
:raises RedisReplyError: if get fails
:return: numpy array
:rtype: np.array
"""
try:
return super().get_tensor(key)
except RuntimeError as e:
raise RedisReplyError(str(e), "get_tensor") from None
def delete_tensor(self, key):
"""Delete a tensor within the database
:param key: key tensor is stored at
:type key: str
:raises RedisReplyError: if deletion fails
"""
try:
super().delete_tensor(key)
except RuntimeError as e:
raise RedisReplyError(str(e), "delete_tensor") from None
def copy_tensor(self, key, dest_key):
"""Copy a tensor at one key to another key
:param key: key of tensor to be copied
:type key: str
:param dest_key: key to store new copy at
:type dest_key: str
:raises RedisReplyError: if copy operation fails
"""
try:
super().copy_tensor(key, dest_key)
except RuntimeError as e:
raise RedisReplyError(str(e), "copy_tensor") from None
def rename_tensor(self, key, new_key):
"""Rename a tensor in the database
:param key: key of tensor to be renamed
:type key: str
:param new_key: new name for the tensor
:type new_key: str
:raises RedisReplyError: if rename operation fails
"""
try:
super().rename_tensor(key, new_key)
except RuntimeError as e:
raise RedisReplyError(str(e), "rename_tensor") from None
def put_dataset(self, dataset):
"""Put a Dataset instance into the database
All associated tensors and metadata within the Dataset
instance will also be stored.
:param dataset: a Dataset instance
:type dataset: Dataset
:raises TypeError: if argument is not a Dataset
:raises RedisReplyError: if connection fails
"""
if not isinstance(dataset, Dataset):
raise TypeError("Argument to put_dataset was not of type Dataset")
try:
super().put_dataset(dataset)
except RuntimeError as e:
raise RedisReplyError(str(e), "put_dataset") from None
def get_dataset(self, key):
"""Get a dataset from the database
:param key: key the dataset is stored under
:type key: str
:raises RedisConnectionError: if connection fails
:return: Dataset instance
:rtype: Dataset
"""
try:
dataset = super().get_dataset(key)
return dataset
except RuntimeError as e:
raise RedisReplyError(str(e), "get_dataset", key=key) from None
def delete_dataset(self, key):
"""Delete a dataset within the database
:param key: name of the dataset
:type key: str
:raises RedisReplyError: if deletion fails
"""
try:
super().delete_dataset(key)
except RuntimeError as e:
raise RedisReplyError(str(e), "delete_dataset") from None
def copy_dataset(self, key, dest_key):
"""Copy a dataset from one key to another
:param key: name of dataset to be copied
:type key: str
:param dest_key: new name of dataset
:type dest_key: str
:raises RedisReplyError: if copy operation fails
"""
try:
super().copy_dataset(key, dest_key)
except RuntimeError as e:
raise RedisReplyError(str(e), "copy_dataset") from None
def rename_dataset(self, key, new_key):
"""Rename a dataset in the database
:param key: name of the dataset to be renamed
:type key: str
:param new_key: new name for the dataset
:type new_key: str
:raises RedisReplyError: if rename operation fails
"""
try:
super().rename_dataset(key, new_key)
except RuntimeError as e:
raise RedisReplyError(str(e), "rename_dataset") from None
def set_function(self, key, function, device="CPU"):
"""Set a callable function into the database
Function must be a callable TorchScript function and have at least
one input and one output. Call the function with the Client.run_script
method.
Device selection is either "GPU" or "CPU". If many devices are
present, a number can be passed for specification e.g. "GPU:1".
:param key: key to store function at
:type key: str
:param function: callable function
:type function: callable
:param device: device to run function on, defaults to "CPU"
:type device: str, optional
:raises TypeError: if argument was not a callable function
:raises RedisReplyError: if function failed to set
"""
device = self.__check_device(device)
if not callable(function):
raise TypeError("Argument provided was not a callable function")
fn_src = inspect.getsource(function)
try:
super().set_script(key, device, fn_src)
except RuntimeError as e:
raise RedisReplyError(str(e), "set_function") from None
def set_script(self, key, script, device="CPU"):
"""Store a TorchScript at key in the database
Device selection is either "GPU" or "CPU". If many devices are
present, a number can be passed for specification e.g. "GPU:1".
:param key: key to store script under
:type key: str
:param script: TorchScript code
:type script: str
:param device: device for script execution, defaults to "CPU"
:type device: str, optional
:raises RedisReplyError: if script fails to set
"""
device = self.__check_device(device)
try:
super().set_script(key, device, script)
except RuntimeError as e:
raise RedisReplyError(str(e), "set_script") from None
def set_script_from_file(self, key, file, device="CPU"):
"""Same as Client.set_script but from file
:param key: key to store script under
:type key: str
:param file: path to TorchScript code
:type file: str
:param device: device for script execution, defaults to "CPU"
:type device: str, optional
:raises RedisReplyError: if script fails to set
"""
device = self.__check_device(device)
file_path = self.__check_file(file)
try:
super().set_script_from_file(key, device, file_path)
except RuntimeError as e:
raise RedisReplyError(str(e), "set_script_from_file") from None
def get_script(self, key):
"""Get a Torchscript stored in the database
:param key: key at which script is stored
:type key: str
:raises RedisReplyError: if script doesn't exist
:return: TorchScript stored at key
:rtype: str
"""
try:
script = super().get_script(key)
return script
except RuntimeError as e:
raise RedisReplyError(str(e), "get_script") from None
def run_script(self, key, fn_name, inputs, outputs):
"""Execute TorchScript stored inside the database remotely
:param key: key script is stored under
:type key: str
:param fn_name: name of the function within the script to execute
:type fn_name: str
:param inputs: list of input tensors stored in database
:type inputs: list[str]
:param outputs: list of output tensor names to store results under
:type outputs: list[str]
:raises RedisReplyError: if script execution fails
"""
inputs, outputs = self.__check_tensor_args(inputs, outputs)
try:
super().run_script(key, fn_name, inputs, outputs)
except RuntimeError as e:
raise RedisReplyError(str(e), "run_script") from None
def get_model(self, key):
"""Get a stored model
:param key: key of stored model
:type key: str
:raises RedisReplyError: if get fails or model doesnt exist
:return: model
:rtype: bytes
"""
try:
model = super().get_model(key)
return model
except RuntimeError as e:
raise RedisReplyError(str(e), "get_model")
def set_model(
self,
key,
model,
backend,
device="CPU",
batch_size=0,
min_batch_size=0,
tag="",
inputs=None,
outputs=None,
):
"""Put a TF, TF-lite, PT, or ONNX model in the database
:param key: key to store model under
:type key: str
:param model: serialized model
:type model: bytes
:param backend: name of the backend (TORCH, TF, TFLITE, ONNX)
:type backend: str
:param device: name of device for execution, defaults to "CPU"
:type device: str, optional
:param batch_size: batch size for execution, defaults to 0
:type batch_size: int, optional
:param min_batch_size: minimum batch size for model execution, defaults to 0
:type min_batch_size: int, optional
:param tag: additional tag for model information, defaults to ""
:type tag: str, optional
:param inputs: model inputs (TF only), defaults to None
:type inputs: list[str], optional
:param outputs: model outupts (TF only), defaults to None
:type outputs: list[str], optional
:raises RedisReplyError: if model fails to set
"""
device = self.__check_device(device)
backend = self.__check_backend(backend)
inputs, outputs = self.__check_tensor_args(inputs, outputs)
try:
super().set_model(
key,
model,
backend,
device,
batch_size,
min_batch_size,
tag,
inputs,
outputs,
)
except RuntimeError as e:
raise RedisReplyError(str(e), "set_model") from None
def set_model_from_file(
self,
key,
model_file,
backend,
device="CPU",
batch_size=0,
min_batch_size=0,
tag="",
inputs=None,
outputs=None,
):
"""Put a TF, TF-lite, PT, or ONNX model from file in the database
:param key: key to store model under
:type key: str
:param model_file: serialized model
:type model_file: file path to model
:param backend: name of the backend (TORCH, TF, TFLITE, ONNX)
:type backend: str
:param device: name of device for execution, defaults to "CPU"
:type device: str, optional
:param batch_size: batch size for execution, defaults to 0
:type batch_size: int, optional
:param min_batch_size: minimum batch size for model execution, defaults to 0
:type min_batch_size: int, optional
:param tag: additional tag for model information, defaults to ""
:type tag: str, optional
:param inputs: model inputs (TF only), defaults to None
:type inputs: list[str], optional
:param outputs: model outupts (TF only), defaults to None
:type outputs: list[str], optional
:raises RedisReplyError: if model fails to set
"""
device = self.__check_device(device)
backend = self.__check_backend(backend)
m_file = self.__check_file(model_file)
inputs, outputs = self.__check_tensor_args(inputs, outputs)
try:
super().set_model_from_file(
key,
m_file,
backend,
device,
batch_size,
min_batch_size,
tag,
inputs,
outputs,
)
except RuntimeError as e:
raise RedisReplyError(str(e), "set_model_from_file") from None
def run_model(self, key, inputs=None, outputs=None):
"""Execute a stored model
:param key: key for stored model
:type key: str
:param inputs: keys of stored inputs to provide model, defaults to None
:type inputs: list[str], optional
:param outputs: keys to store outputs under, defaults to None
:type outputs: list[str], optional
:raises RedisReplyError: if model execution fails
"""
inputs, outputs = self.__check_tensor_args(inputs, outputs)
try:
super().run_model(key, inputs, outputs)
except RuntimeError as e:
raise RedisReplyError(str(e), "run_model")
def tensor_exists(self, name):
"""Check if a tensor or dataset exists in the database
The key associated to the entity will be
computed internally based on the current prefix behavior.
:param key: The tensor or dataset name that will be checked in the database
:type key: str
:returns: Returns true if the tensor or dataset exists in the database
:rtype: bool
:raises RedisReplyError: if `tensor_exists` fails (i.e. causes an error)
"""
try:
return super().tensor_exists(name)
except RuntimeError as e:
raise RedisReplyError(str(e), "tensor_exists")
def model_exists(self, name):
"""Check if a model or script exists in the database
The key associated to the entity will be
computed internally based on the current prefix behavior.
:param key: The model or script name that will be checked in the database
:type key: str
:returns: Returns true if the model exists in the database
:rtype: bool
:raises RedisReplyError: if `model_exists` fails (i.e. causes an error)
"""
try:
return super().model_exists(name)
except RuntimeError as e:
raise RedisReplyError(str(e), "model_exists")
def key_exists(self, key):
"""Check if the key exists in the database
:param key: The key that will be checked in the database
:type key: str
:returns: Returns true if the key exists in the database
:rtype: bool
:raises RedisReplyError: if `key_exists` fails
"""
try:
return super().key_exists(key)
except RuntimeError as e:
raise RedisReplyError(str(e), "key_exists")
def poll_key(self, key, poll_frequency_ms, num_tries):
"""Check if the key exists in the database
The check is performed repeatedly at a
specified frequency for a specified number
of times.
:param key: The key that will be checked in the database
:type key: int
:param poll_frequency_ms: The frequency of checks for the
key in milliseconds
:type poll_frequency_ms: int
:param num_tries: The total number of times to check for
the specified number of keys. If the
value is set to -1, the key will be
polled indefinitely.
:type num_tries: int
:returns: Returns true if the key is found within the
specified number of tries, otherwise false.
:rtype: bool
:raises RedisReplyError: if key poll fails
"""
try:
return super().poll_key(key, poll_frequency_ms, num_tries)
except RuntimeError as e:
raise RedisReplyError(str(e), "poll_key")
def poll_tensor(self, name, poll_frequency_ms, num_tries):
"""Check if a tensor or dataset exists in the database
The check will be performed at a
specified frequency for a specified number
of times. The key associated to the entity will be
computed internally based on the current prefix behavior.
:param key: The key that will be checked in the database
:type key: int
:param poll_frequency_ms: The frequency of checks for the
key in milliseconds
:type poll_frequency_ms: int
:param num_tries: The total number of times to check for
the specified number of keys. If the
value is set to -1, the key will be
polled indefinitely.
:type num_tries: int
:returns: Returns true if the key is found within the
specified number of tries, otherwise false.
:rtype: bool
:raises RedisReplyError: if `poll_tensor` fails
"""
try:
return super().poll_tensor(name, poll_frequency_ms, num_tries)
except RuntimeError as e:
raise RedisReplyError(str(e), "poll_tensor")
def poll_model(self, name, poll_frequency_ms, num_tries):
"""Check if a model or script exists in the database
The check will be performed at a
specified frequency for a specified number
of times. The key associated to the entity will be
computed internally based on the current prefix behavior.
:param key: The key that will be checked in the database
:type key: int
:param poll_frequency_ms: The frequency of checks for the
key in milliseconds
:type poll_frequency_ms: int
:param num_tries: The total number of times to check for
the specified number of keys. If the
value is set to -1, the key will be
polled indefinitely.
:type num_tries: int
:returns: Returns true if the key is found within the
specified number of tries, otherwise false.
:rtype: bool
:raises RedisReplyError: if `poll_model` fails
"""
try:
return super().poll_model(name, poll_frequency_ms, num_tries)
except RuntimeError as e:
raise RedisReplyError(str(e), "poll_model")
def set_data_source(self, source_id):
"""Set the data source (i.e. key prefix for get functions)
:param source_id: The prefix for retrieval commands
:type source_id: str
:raises RedisReplyError: if set data
"""
try:
return super().set_data_source(source_id)
except RuntimeError as e:
raise RuntimeError(str(e), "set_data_source")
def use_model_ensemble_prefix(self, use_prefix):
"""Set whether model and script keys should be prefixed
This function can be used to avoid key collisions in an ensemble.
Prefixes will only be used if they were previously set through
environment variables SSKEYIN and SSKEYOUT.
By default, the client does not prefix model and script
keys.
:param use_prefix: If set to true, all future operations
on models and scripts will use a prefix, if
available.
:type use_prefix: bool
"""
try:
return super().use_model_ensemble_prefix(use_prefix)
except RuntimeError as e:
raise RedisReplyError(str(e), "use_model_ensemble_prefix")
def use_tensor_ensemble_prefix(self, use_prefix):
"""Set whether tensor and dataset keys should be prefixed
This function can be used to avoid key collisions in an ensemble.
Prefixes will only be used if they were previously set through
environment variables SSKEYIN and SSKEYOUT.
By default, the client prefixes tensor and dataset
keys when a prefix is available.
:param use_prefix: If set to true, all future operations
on tensors and datasets will use a prefix, if
available.
:type use_prefix: bool
"""
try:
return super().use_tensor_ensemble_prefix(use_prefix)
except RuntimeError as e:
raise RedisReplyError(str(e), "use_tensor_ensemble_prefix")
def get_db_node_info(self, addresses):
"""Returns information about given database nodes
:param addresses: The addresses of the database nodes
:type address: list[str]
:returns: A list of dictionaries with each entry in the
list corresponding to an address reply
:rtype: list[dict]
:raises RedisReplyError: if there is an error in
in command execution or the address
is not reachable by the client.
In the case of using a cluster of database nodes,
it is best practice to bind each node in the cluster
to a specific adddress to avoid inconsistencies in
addresses retreived with the CLUSTER SLOTS command.
Inconsistencies in node addresses across
CLUSTER SLOTS comands will lead to RedisReplyError
being thrown.
"""
try:
return super().get_db_node_info(addresses)
except RuntimeError as e:
raise RedisReplyError(str(e), "get_db_node_info")
def get_db_cluster_info(self, addresses):
"""Returns cluster information from a specified db node.
If the address does not correspond to a cluster node,
an empty dictionary is returned.
:param addresses: The addresses of the database nodes
:type address: list[str]
:returns: A list of dictionaries with each entry in the
list corresponding to an address reply
:rtype: list[dict]
:raises RedisReplyError: if there is an error in
in command execution or the address
is not reachable by the client.
In the case of using a cluster of database nodes,
it is best practice to bind each node in the cluster
to a specific adddress to avoid inconsistencies in
addresses retreived with the CLUSTER SLOTS command.
Inconsistencies in node addresses across
CLUSTER SLOTS comands will lead to RedisReplyError
being thrown.
"""
try:
return super().get_db_cluster_info(addresses)
except RuntimeError as e:
raise RedisReplyError(str(e), "get_db_cluster_info")
# ---- helpers --------------------------------------------------------
@staticmethod
def __check_tensor_args(inputs, outputs):
inputs = init_default([], inputs, (list, str))
outputs = init_default([], outputs, (list, str))
if isinstance(inputs, str):
inputs = [inputs]
if isinstance(outputs, str):
outputs = [outputs]
return inputs, outputs
@staticmethod
def __check_backend(backend):
backend = backend.upper()
if backend in ["TF", "TFLITE", "TORCH", "ONNX"]:
return backend
else:
raise TypeError(f"Backend type {backend} unsupported")
@staticmethod
def __check_file(file):
file_path = osp.abspath(file)
if not osp.isfile(file_path):
raise FileNotFoundError(file_path)
return file_path
@staticmethod
def __check_device(device):
device = device.upper()
if not device.startswith("CPU") and not device.startswith("GPU"):
raise TypeError("Device argument must start with either CPU or GPU")
return device
@staticmethod
def __set_address(address):
if "SSDB" in os.environ:
del os.environ["SSDB"]
os.environ["SSDB"] = address
| []
| []
| [
"SSDB"
]
| [] | ["SSDB"] | python | 1 | 0 | |
share/qt/extract_strings_qt.py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/sibcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *dash_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("sibcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| []
| []
| [
"XGETTEXT"
]
| [] | ["XGETTEXT"] | python | 1 | 0 | |
server/server.go | /*
* Copyright 2017-2019 Kopano and its licensors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package server
import (
"context"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/gorilla/mux"
"github.com/longsleep/go-metrics/loggedwriter"
"github.com/longsleep/go-metrics/timing"
"github.com/sirupsen/logrus"
)
// Server is our HTTP server implementation.
type Server struct {
Config *Config
listenAddr string
logger logrus.FieldLogger
requestLog bool
}
// NewServer constructs a server from the provided parameters.
func NewServer(c *Config) (*Server, error) {
s := &Server{
Config: c,
listenAddr: c.Config.ListenAddr,
logger: c.Config.Logger,
requestLog: os.Getenv("KOPANO_DEBUG_SERVER_REQUEST_LOG") == "1",
}
return s, nil
}
// AddContext adds the associated server context with cancel to the the provided
// httprouter.Handle. When the handler is done, the per Request context is
// cancelled.
func (s *Server) AddContext(parent context.Context, next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
// Create per request context.
ctx, cancel := context.WithCancel(parent)
if s.requestLog {
loggedWriter := metrics.NewLoggedResponseWriter(rw)
// Create per request context.
ctx = timing.NewContext(ctx, func(duration time.Duration) {
// This is the stop callback, called when complete with duration.
durationMs := float64(duration) / float64(time.Millisecond)
// Log request.
s.logger.WithFields(logrus.Fields{
"status": loggedWriter.Status(),
"method": req.Method,
"path": req.URL.Path,
"remote": req.RemoteAddr,
"duration": durationMs,
"referer": req.Referer(),
"user-agent": req.UserAgent(),
"origin": req.Header.Get("Origin"),
}).Debug("HTTP request complete")
})
rw = loggedWriter
}
// Run the request.
next.ServeHTTP(rw, req.WithContext(ctx))
// Cancel per request context when done.
cancel()
})
}
// AddRoutes add the associated Servers URL routes to the provided router with
// the provided context.Context.
func (s *Server) AddRoutes(ctx context.Context, router *mux.Router) {
// TODO(longsleep): Add subpath support to all handlers and paths.
router.HandleFunc("/health-check", s.HealthCheckHandler)
for _, route := range s.Config.Routes {
route.AddRoutes(ctx, router)
}
if s.Config.Handler != nil {
// Delegate rest to provider which is also a handler.
router.NotFoundHandler = s.Config.Handler
}
}
// Serve starts all the accociated servers resources and listeners and blocks
// forever until signals or error occurs. Returns error and gracefully stops
// all HTTP listeners before return.
func (s *Server) Serve(ctx context.Context) error {
serveCtx, serveCtxCancel := context.WithCancel(ctx)
defer serveCtxCancel()
logger := s.logger
errCh := make(chan error, 2)
exitCh := make(chan bool, 1)
signalCh := make(chan os.Signal)
router := mux.NewRouter()
s.AddRoutes(serveCtx, router)
// HTTP listener.
srv := &http.Server{
Handler: s.AddContext(serveCtx, router),
}
logger.WithField("listenAddr", s.listenAddr).Infoln("starting http listener")
listener, err := net.Listen("tcp", s.listenAddr)
if err != nil {
return err
}
logger.Infoln("ready to handle requests")
go func() {
serveErr := srv.Serve(listener)
if serveErr != nil {
errCh <- serveErr
}
logger.Debugln("http listener stopped")
close(exitCh)
}()
// Wait for exit or error.
signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM)
select {
case err = <-errCh:
// breaks
case reason := <-signalCh:
logger.WithField("signal", reason).Warnln("received signal")
// breaks
}
// Shutdown, server will stop to accept new connections, requires Go 1.8+.
logger.Infoln("clean server shutdown start")
shutDownCtx, shutDownCtxCancel := context.WithTimeout(ctx, 10*time.Second)
if shutdownErr := srv.Shutdown(shutDownCtx); shutdownErr != nil {
logger.WithError(shutdownErr).Warn("clean server shutdown failed")
}
// Cancel our own context, wait on managers.
serveCtxCancel()
func() {
for {
select {
case <-exitCh:
return
default:
// HTTP listener has not quit yet.
logger.Info("waiting for http listener to exit")
}
select {
case reason := <-signalCh:
logger.WithField("signal", reason).Warn("received signal")
return
case <-time.After(100 * time.Millisecond):
}
}
}()
shutDownCtxCancel() // prevent leak.
return err
}
| [
"\"KOPANO_DEBUG_SERVER_REQUEST_LOG\""
]
| []
| [
"KOPANO_DEBUG_SERVER_REQUEST_LOG"
]
| [] | ["KOPANO_DEBUG_SERVER_REQUEST_LOG"] | go | 1 | 0 | |
awsmfa/__init__.py | import argparse
try:
import configparser
from configparser import NoOptionError, NoSectionError
except ImportError:
import ConfigParser as configparser
from ConfigParser import NoOptionError, NoSectionError
import datetime
import getpass
import logging
import os
import sys
import boto3
from botocore.exceptions import ClientError, ParamValidationError
from config import initial_setup
from util import log_error_and_exit, prompter
logger = logging.getLogger('aws-mfa')
AWS_CREDS_PATH = '%s/.aws/credentials' % (os.path.expanduser('~'),)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--device',
required=False,
metavar='arn:aws:iam::123456788990:mfa/dudeman',
help="The MFA Device ARN. This value can also be "
"provided via the environment variable 'MFA_DEVICE' or"
" the ~/.aws/credentials variable 'aws_mfa_device'.")
parser.add_argument('--duration',
type=int,
help="The duration, in seconds, that the temporary "
"credentials should remain valid. Minimum value: "
"900 (15 minutes). Maximum: 129600 (36 hours). "
"Defaults to 43200 (12 hours), or 3600 (one "
"hour) when using '--assume-role'. This value "
"can also be provided via the environment "
"variable 'MFA_STS_DURATION'. ")
parser.add_argument('--profile',
help="If using profiles, specify the name here. The "
"default profile name is 'default'. The value can "
"also be provided via the environment variable "
"'AWS_PROFILE'.",
required=False)
parser.add_argument('--long-term-suffix', '--long-suffix',
help="The suffix appended to the profile name to"
"identify the long term credential section",
required=False)
parser.add_argument('--short-term-suffix', '--short-suffix',
help="The suffix appended to the profile name to"
"identify the short term credential section",
required=False)
parser.add_argument('--assume-role', '--assume',
metavar='arn:aws:iam::123456788990:role/RoleName',
help="The ARN of the AWS IAM Role you would like to "
"assume, if specified. This value can also be provided"
" via the environment variable 'MFA_ASSUME_ROLE'",
required=False)
parser.add_argument('--role-session-name',
help="Friendly session name required when using "
"--assume-role",
default=getpass.getuser(),
required=False)
parser.add_argument('--force',
help="Refresh credentials even if currently valid.",
action="store_true",
required=False)
parser.add_argument('--log-level',
help="Set log level",
choices=[
'CRITICAL', 'ERROR', 'WARNING',
'INFO', 'DEBUG', 'NOTSET'
],
required=False,
default='DEBUG')
parser.add_argument('--setup',
help="Setup a new log term credentials section",
action="store_true",
required=False)
args = parser.parse_args()
level = getattr(logging, args.log_level)
setup_logger(level)
if not os.path.isfile(AWS_CREDS_PATH):
console_input = prompter()
create = console_input("Could not locate credentials file at {}, "
"would you like to create one? "
"[y/n]".format(AWS_CREDS_PATH))
if create.lower() == "y":
with open(AWS_CREDS_PATH, 'a'):
pass
else:
log_error_and_exit(logger, 'Could not locate credentials file at '
'%s' % (AWS_CREDS_PATH,))
config = get_config(AWS_CREDS_PATH)
if args.setup:
initial_setup(logger, config, AWS_CREDS_PATH)
return
validate(args, config)
def get_config(aws_creds_path):
config = configparser.RawConfigParser()
try:
config.read(aws_creds_path)
except configparser.ParsingError:
e = sys.exc_info()[1]
log_error_and_exit(logger, "There was a problem reading or parsing "
"your credentials file: %s" % (e.args[0],))
return config
def validate(args, config):
if not args.profile:
if os.environ.get('AWS_PROFILE'):
args.profile = os.environ.get('AWS_PROFILE')
else:
args.profile = 'default'
if not args.long_term_suffix:
long_term_name = '%s-long-term' % (args.profile,)
elif args.long_term_suffix.lower() == 'none':
long_term_name = args.profile
else:
long_term_name = '%s-%s' % (args.profile, args.long_term_suffix)
if not args.short_term_suffix or args.short_term_suffix.lower() == 'none':
short_term_name = args.profile
else:
short_term_name = '%s-%s' % (args.profile, args.short_term_suffix)
if long_term_name == short_term_name:
log_error_and_exit(logger,
"The value for '--long-term-suffix' cannot "
"be equal to the value for '--short-term-suffix'")
if args.assume_role:
role_msg = "with assumed role: %s" % (args.assume_role,)
elif config.has_option(args.profile, 'assumed_role_arn'):
role_msg = "with assumed role: %s" % (
config.get(args.profile, 'assumed_role_arn'))
else:
role_msg = ""
logger.info('Validating credentials for profile: %s %s' %
(short_term_name, role_msg))
reup_message = "Obtaining credentials for a new role or profile."
try:
key_id = config.get(long_term_name, 'aws_access_key_id')
access_key = config.get(long_term_name, 'aws_secret_access_key')
except NoSectionError:
log_error_and_exit(logger,
"Long term credentials session '[%s]' is missing. "
"You must add this section to your credentials file "
"along with your long term 'aws_access_key_id' and "
"'aws_secret_access_key'" % (long_term_name,))
except NoOptionError as e:
log_error_and_exit(logger, e)
# get device from param, env var or config
if not args.device:
if os.environ.get('MFA_DEVICE'):
args.device = os.environ.get('MFA_DEVICE')
elif config.has_option(long_term_name, 'aws_mfa_device'):
args.device = config.get(long_term_name, 'aws_mfa_device')
else:
log_error_and_exit(logger,
'You must provide --device or MFA_DEVICE or set '
'"aws_mfa_device" in ".aws/credentials"')
# get assume_role from param or env var
if not args.assume_role:
if os.environ.get('MFA_ASSUME_ROLE'):
args.assume_role = os.environ.get('MFA_ASSUME_ROLE')
elif config.has_option(long_term_name, 'assume_role'):
args.assume_role = config.get(long_term_name, 'assume_role')
# get duration from param, env var or set default
if not args.duration:
if os.environ.get('MFA_STS_DURATION'):
args.duration = int(os.environ.get('MFA_STS_DURATION'))
else:
args.duration = 3600 if args.assume_role else 43200
# If this is False, only refresh credentials if expired. Otherwise
# always refresh.
force_refresh = False
# Validate presence of short-term section
if not config.has_section(short_term_name):
logger.info("Short term credentials section %s is missing, "
"obtaining new credentials." % (short_term_name,))
if short_term_name == 'default':
try:
config.add_section(short_term_name)
# a hack for creating a section named "default"
except ValueError:
configparser.DEFAULTSECT = short_term_name
config.set(short_term_name, 'CREATE', 'TEST')
config.remove_option(short_term_name, 'CREATE')
else:
config.add_section(short_term_name)
force_refresh = True
# Validate option integrity of short-term section
else:
required_options = ['assumed_role',
'aws_access_key_id', 'aws_secret_access_key',
'aws_session_token', 'aws_security_token',
'expiration']
try:
short_term = {}
for option in required_options:
short_term[option] = config.get(short_term_name, option)
except NoOptionError:
logger.warn("Your existing credentials are missing or invalid, "
"obtaining new credentials.")
force_refresh = True
try:
current_role = config.get(short_term_name, 'assumed_role_arn')
except NoOptionError:
current_role = None
if args.force:
logger.info("Forcing refresh of credentials.")
force_refresh = True
# There are not credentials for an assumed role,
# but the user is trying to assume one
elif current_role is None and args.assume_role:
logger.info(reup_message)
force_refresh = True
# There are current credentials for a role and
# the role arn being provided is the same.
elif (current_role is not None and
args.assume_role and current_role == args.assume_role):
pass
# There are credentials for a current role and the role
# that is attempting to be assumed is different
elif (current_role is not None and
args.assume_role and current_role != args.assume_role):
logger.info(reup_message)
force_refresh = True
# There are credentials for a current role and no role arn is
# being supplied
elif current_role is not None and args.assume_role is None:
logger.info(reup_message)
force_refresh = True
should_refresh = True
# Unless we're forcing a refresh, check expiration.
if not force_refresh:
exp = datetime.datetime.strptime(
config.get(short_term_name, 'expiration'), '%Y-%m-%d %H:%M:%S')
diff = exp - datetime.datetime.utcnow()
if diff.total_seconds() <= 0:
logger.info("Your credentials have expired, renewing.")
else:
should_refresh = False
logger.info(
"Your credentials are still valid for %s seconds"
" they will expire at %s"
% (diff.total_seconds(), exp))
if should_refresh:
get_credentials(short_term_name, key_id, access_key, args, config)
def get_credentials(short_term_name, lt_key_id, lt_access_key, args, config):
console_input = prompter()
mfa_token = console_input('Enter AWS MFA code for device [%s] '
'(renewing for %s seconds):' %
(args.device, args.duration))
client = boto3.client(
'sts',
aws_access_key_id=lt_key_id,
aws_secret_access_key=lt_access_key
)
if args.assume_role:
logger.info("Assuming Role - Profile: %s, Role: %s, Duration: %s",
short_term_name, args.assume_role, args.duration)
if args.role_session_name is None:
log_error_and_exit(logger, "You must specify a role session name "
"via --role-session-name")
try:
response = client.assume_role(
RoleArn=args.assume_role,
RoleSessionName=args.role_session_name,
DurationSeconds=args.duration,
SerialNumber=args.device,
TokenCode=mfa_token
)
except ClientError as e:
log_error_and_exit(logger,
"An error occured while calling "
"assume role: {}".format(e))
except ParamValidationError:
log_error_and_exit(logger, "Token must be six digits")
config.set(
short_term_name,
'assumed_role',
'True',
)
config.set(
short_term_name,
'assumed_role_arn',
args.assume_role,
)
else:
logger.info("Fetching Credentials - Profile: %s, Duration: %s",
short_term_name, args.duration)
try:
response = client.get_session_token(
DurationSeconds=args.duration,
SerialNumber=args.device,
TokenCode=mfa_token
)
except ClientError as e:
log_error_and_exit(
logger,
"An error occured while calling assume role: {}".format(e))
except ParamValidationError:
log_error_and_exit(
logger,
"Token must be six digits")
config.set(
short_term_name,
'assumed_role',
'False',
)
config.remove_option(short_term_name, 'assumed_role_arn')
# aws_session_token and aws_security_token are both added
# to support boto and boto3
options = [
('aws_access_key_id', 'AccessKeyId'),
('aws_secret_access_key', 'SecretAccessKey'),
('aws_session_token', 'SessionToken'),
('aws_security_token', 'SessionToken'),
]
for option, value in options:
config.set(
short_term_name,
option,
response['Credentials'][value]
)
# Save expiration individiually, so it can be manipulated
config.set(
short_term_name,
'expiration',
response['Credentials']['Expiration'].strftime('%Y-%m-%d %H:%M:%S')
)
with open(AWS_CREDS_PATH, 'w') as configfile:
config.write(configfile)
logger.info(
"Success! Your credentials will expire in %s seconds at: %s"
% (args.duration, response['Credentials']['Expiration']))
sys.exit(0)
def setup_logger(level=logging.DEBUG):
stdout_handler = logging.StreamHandler(stream=sys.stdout)
stdout_handler.setFormatter(
logging.Formatter('%(levelname)s - %(message)s'))
stdout_handler.setLevel(level)
logger.addHandler(stdout_handler)
logger.setLevel(level)
if __name__ == "__main__":
main()
| []
| []
| [
"AWS_PROFILE",
"MFA_ASSUME_ROLE",
"MFA_DEVICE",
"MFA_STS_DURATION"
]
| [] | ["AWS_PROFILE", "MFA_ASSUME_ROLE", "MFA_DEVICE", "MFA_STS_DURATION"] | python | 4 | 0 | |
cmd/hypercloud-installer/app/container/config_service.go | // Copyright 2019 Axel Etcheverry. All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
package container
import (
"flag"
"os"
"strings"
"time"
service "github.com/euskadi31/go-service"
"github.com/hyperscale/hypercloud/cmd/hypercloud-installer/app/config"
"github.com/rs/zerolog/log"
"github.com/spf13/viper"
)
// Services keys
const (
ConfigKey = "service.config"
)
const name = "hypercloud-installer"
func init() {
service.Set(ConfigKey, func(c service.Container) interface{} {
cmd := c.Get(FlagsKey).(*flag.FlagSet)
var cfgFile string
cmd.StringVar(&cfgFile, "config", "", "config file (default is $HOME/config.yaml)")
// Ignore errors; cmd is set for ExitOnError.
// nolint:gosec
_ = cmd.Parse(os.Args[1:])
cfg := config.NewConfiguration()
options := viper.New()
options.SetDefault("logger.level", "info")
options.SetDefault("logger.prefix", name)
options.SetDefault("server.http.host", "")
options.SetDefault("server.http.port", 8080)
options.SetDefault("server.profiling", true)
options.SetDefault("server.metrics", true)
options.SetDefault("server.healthcheck", true)
options.SetDefault("server.shutdown_timeout", 10*time.Second)
options.SetDefault("server.write_timeout", 0)
options.SetDefault("server.read_timeout", 10*time.Second)
options.SetDefault("server.read_header_timeout", 10*time.Millisecond)
options.SetConfigName("config") // name of config file (without extension)
options.AddConfigPath("/etc/" + name + "/") // path to look for the config file in
options.AddConfigPath("$HOME/." + name + "/") // call multiple times to add many search paths
options.AddConfigPath(".")
if cfgFile != "" { // enable ability to specify config file via flag
options.SetConfigFile(cfgFile)
}
if port := os.Getenv("PORT"); port != "" {
os.Setenv("HYPERCLOUD_INSTALLER_SERVER_HTTP_PORT", port)
}
options.SetEnvPrefix("HYPERCLOUD_INSTALLER")
options.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
options.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := options.ReadInConfig(); err == nil {
log.Info().Msgf("Using config file: %s", options.ConfigFileUsed())
}
if err := options.Unmarshal(cfg); err != nil {
log.Fatal().Err(err).Msg(ConfigKey)
}
return cfg // *config.Configuration
})
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
resolution_cards/thegamecrafter/core.py | #! /usr/bin/env python
import os
import requests
base_url="https://www.thegamecrafter.com/api"
session = None
def post(endpoint, files=None, **kwargs):
if session is None:
raise Exception('Must be logged in before post() can be called')
url = base_url
if not endpoint.startswith('/'):
url += '/'
url += endpoint
params = kwargs
params['session_id'] = session['id']
print 'POST', url, params.keys()
response = requests.post(url, params=params, files=files)
if not str(response.status_code).startswith('2'):
print 'FAIL', response
print 'FAIL', response.json()
raise Exception('Request failed')
return response.json()['result']
def get(endpoint, **kwargs):
if session is None:
raise Exception('Must be logged in before get() can be called')
url = base_url
if not endpoint.startswith('/'):
url += '/'
url += endpoint
params = kwargs
params['session_id'] = session['id']
print 'GET', url, params.keys()
response = requests.get(url, params=params)
if not str(response.status_code).startswith('2'):
print 'FAIL', response
print 'FAIL', response.json()
raise Exception('Request failed')
return response.json()['result']
def login():
global session
params = {
'api_key_id': os.environ.get('THEGAMECRAFTER_PUBLIC_KEY'),
'username' : os.environ.get('THEGAMECRAFTER_USER'),
'password': os.environ.get('THEGAMECRAFTER_PASSWORD'),
}
response = requests.post(base_url + "/session", params=params)
print 'LOGIN RESPONSE', response.json()
if response.status_code == 200:
session = response.json()['result']
else:
if not os.environ.get('THEGAMECRAFTER_PUBLIC_KEY'):
print 'You need to set the env variable THEGAMECRAFTER_PUBLIC_KEY'
if not os.environ.get('THEGAMECRAFTER_USER'):
print 'You need to set the env variable THEGAMECRAFTER_USER'
if not os.environ.get('THEGAMECRAFTER_PASSWORD'):
print 'You need to set the env variable THEGAMECRAFTER_PASSWORD'
raise Exception('Could not log in. Check your environment variables')
return get('/user/' + session['user_id'])
def new_game(user, name):
return post('game',
name=name,
designer_id=user.designer_id,
description='Automatically created (%s)' % name,
)
def new_folder(user, asset_name, parent_id=None):
if parent_id is None:
parent_id = user['root_folder_id']
return post('folder',
name=asset_name,
user_id=user.id,
parent_id=parent_id,
)
def new_file(filepath, folder_id):
if not os.path.isfile(filepath):
raise Exception('Not a file: %s' % filepath)
fp = file(filepath)
filename = os.path.basename(filepath)
return post('file', files={'file':fp}, name=filename, folder_id=folder_id)
| []
| []
| [
"THEGAMECRAFTER_PASSWORD",
"THEGAMECRAFTER_USER",
"THEGAMECRAFTER_PUBLIC_KEY"
]
| [] | ["THEGAMECRAFTER_PASSWORD", "THEGAMECRAFTER_USER", "THEGAMECRAFTER_PUBLIC_KEY"] | python | 3 | 0 | |
azure-batch/src/test/java/com/microsoft/azure/batch/BatchTestBase.java | /**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*/
package com.microsoft.azure.batch;
import okhttp3.logging.HttpLoggingInterceptor;
import com.microsoft.azure.batch.BatchClient;
import com.microsoft.azure.batch.auth.BatchSharedKeyCredentials;
import com.microsoft.azure.batch.protocol.models.*;
import java.util.*;
import org.junit.Assert;
/**
* The base for batch dataplane tests.
*/
public abstract class BatchTestBase {
protected static BatchClient batchClient;
protected static void createClient() {
BatchSharedKeyCredentials credentials = new BatchSharedKeyCredentials(
System.getenv("AZURE_BATCH_ENDPOINT"),
System.getenv("AZURE_BATCH_ACCOUNT"),
System.getenv("AZURE_BATCH_ACCESS_KEY"));
batchClient = BatchClient.open(credentials);
}
protected static CloudPool createIfNotExistPaaSPool(String poolId) throws Exception {
// Create a pool with 3 Small VMs
String POOL_VM_SIZE = "Small";
int POOL_VM_COUNT = 3;
String POOL_OS_FAMILY = "4";
String POOL_OS_VERSION = "*";
// 5 minutes
long POOL_STEADY_TIMEOUT = 5 * 60 * 60;
// Check if pool exists
if (!batchClient.poolOperations().existsPool(poolId)) {
// Use PaaS VM with Windows
CloudServiceConfiguration configuration = new CloudServiceConfiguration();
configuration.withOsFamily(POOL_OS_FAMILY).withTargetOSVersion(POOL_OS_VERSION);
batchClient.poolOperations().createPool(poolId, POOL_VM_SIZE, configuration, POOL_VM_COUNT);
}
long startTime = System.currentTimeMillis();
long elapsedTime = 0L;
boolean steady = false;
CloudPool pool;
// Wait for the VM to be allocated
while (elapsedTime < POOL_STEADY_TIMEOUT) {
pool = batchClient.poolOperations().getPool(poolId);
if (pool.allocationState() == AllocationState.STEADY) {
steady = true;
break;
}
System.out.println("wait 30 seconds for pool steady...");
Thread.sleep(30 * 1000);
elapsedTime = (new Date()).getTime() - startTime;
}
Assert.assertTrue("The pool did not reach a steady state in the allotted time", steady);
return batchClient.poolOperations().getPool(poolId);
}
protected static String getStringWithUserNamePrefix(String name) {
String userName = System.getProperty("user.name");
return userName + name;
}
}
| [
"\"AZURE_BATCH_ENDPOINT\"",
"\"AZURE_BATCH_ACCOUNT\"",
"\"AZURE_BATCH_ACCESS_KEY\""
]
| []
| [
"AZURE_BATCH_ACCESS_KEY",
"AZURE_BATCH_ENDPOINT",
"AZURE_BATCH_ACCOUNT"
]
| [] | ["AZURE_BATCH_ACCESS_KEY", "AZURE_BATCH_ENDPOINT", "AZURE_BATCH_ACCOUNT"] | java | 3 | 0 | |
server/server.go | package server
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
"sync"
"text/template"
"time"
"../prometheus"
"github.com/gorilla/mux"
"github.com/gorilla/schema"
)
var decoder = schema.NewDecoder()
var mu = &sync.Mutex{}
var logPrintf = log.Printf
var listenerTimeout = 30 * time.Second
type serve struct {
scrapes map[string]prometheus.Scrape
alerts map[string]prometheus.Alert
}
type response struct {
Status int
Message string
Alerts []prometheus.Alert
prometheus.Scrape
}
var httpListenAndServe = http.ListenAndServe
const scrapePort = "SCRAPE_PORT"
const serviceName = "SERVICE_NAME"
// New returns instance of the `serve` structure
var New = func() *serve {
return &serve{
alerts: make(map[string]prometheus.Alert),
scrapes: make(map[string]prometheus.Scrape),
}
}
func (s *serve) Execute() error {
s.InitialConfig()
prometheus.WriteConfig(s.scrapes, s.alerts)
go prometheus.Run()
address := "0.0.0.0:8080"
r := mux.NewRouter().StrictSlash(true)
r.HandleFunc("/v1/docker-flow-monitor/reconfigure", s.ReconfigureHandler)
r.HandleFunc("/v1/docker-flow-monitor/remove", s.RemoveHandler)
r.HandleFunc("/v1/docker-flow-monitor/ping", s.PingHandler)
// TODO: Do we need catch all?
r.HandleFunc("/v1/docker-flow-monitor/", s.EmptyHandler)
logPrintf("Starting Docker Flow Monitor")
if err := httpListenAndServe(address, r); err != nil {
logPrintf(err.Error())
return err
}
return nil
}
func (s *serve) PingHandler(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
}
func (s *serve) EmptyHandler(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
}
func (s *serve) ReconfigureHandler(w http.ResponseWriter, req *http.Request) {
mu.Lock()
defer mu.Unlock()
logPrintf("Processing " + req.URL.String())
req.ParseForm()
scrape := s.getScrape(req)
s.deleteAlerts(scrape.ServiceName)
alerts := s.getAlerts(req)
prometheus.WriteConfig(s.scrapes, s.alerts)
err := prometheus.Reload()
statusCode := http.StatusOK
resp := s.getResponse(&alerts, &scrape, err, statusCode)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(resp.Status)
js, _ := json.Marshal(resp)
w.Write(js)
}
func (s *serve) RemoveHandler(w http.ResponseWriter, req *http.Request) {
logPrintf("Processing " + req.URL.Path)
req.ParseForm()
serviceName := req.URL.Query().Get("serviceName")
scrape := s.scrapes[serviceName]
delete(s.scrapes, serviceName)
alerts := s.deleteAlerts(serviceName)
prometheus.WriteConfig(s.scrapes, s.alerts)
err := prometheus.Reload()
statusCode := http.StatusOK
resp := s.getResponse(&alerts, &scrape, err, statusCode)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(resp.Status)
js, _ := json.Marshal(resp)
w.Write(js)
}
func (s *serve) InitialConfig() error {
if len(os.Getenv("LISTENER_ADDRESS")) > 0 {
logPrintf("Requesting services from Docker Flow Swarm Listener")
addr := os.Getenv("LISTENER_ADDRESS")
if !strings.HasPrefix(addr, "http") {
addr = fmt.Sprintf("http://%s:8080", addr)
}
addr = fmt.Sprintf("%s/v1/docker-flow-swarm-listener/get-services", addr)
timeout := time.Duration(listenerTimeout)
client := http.Client{Timeout: timeout}
resp, err := client.Get(addr)
if err != nil {
return err
}
body, _ := ioutil.ReadAll(resp.Body)
logPrintf("Processing: %s", string(body))
data := []map[string]string{}
json.Unmarshal(body, &data)
for _, row := range data {
if scrape, err := s.getScrapeFromMap(row); err == nil {
s.scrapes[scrape.ServiceName] = scrape
}
if alert, err := s.getAlertFromMap(row, ""); err == nil {
s.alerts[alert.AlertNameFormatted] = alert
}
for i := 1; i <= 10; i++ {
suffix := fmt.Sprintf(".%d", i)
if alert, err := s.getAlertFromMap(row, suffix); err == nil {
s.alerts[alert.AlertNameFormatted] = alert
} else {
break
}
}
}
scrapeVariablesFromEnv := s.getScrapeVariablesFromEnv()
if len(scrapeVariablesFromEnv) > 0 {
scrape, err := s.parseScrapeFromEnvMap(scrapeVariablesFromEnv)
if err != nil {
return err
}
for _, row := range scrape {
s.scrapes[row.ServiceName] = row
}
}
}
return nil
}
func (s *serve) getScrapeFromMap(data map[string]string) (prometheus.Scrape, error) {
scrape := prometheus.Scrape{}
if port, err := strconv.Atoi(data["scrapePort"]); err == nil {
scrape.ScrapePort = port
}
scrape.ServiceName = data["serviceName"]
scrape.ScrapeType = data["scrapeType"]
if s.isValidScrape(&scrape) {
return scrape, nil
}
return prometheus.Scrape{}, fmt.Errorf("Not a valid scrape")
}
func (s *serve) getAlertFromMap(data map[string]string, suffix string) (prometheus.Alert, error) {
if _, ok := data["alertName"+suffix]; ok {
alert := prometheus.Alert{}
alert.AlertAnnotations = s.getMapFromString(data["alertAnnotations"+suffix])
alert.AlertFor = data["alertFor"+suffix]
alert.AlertIf = data["alertIf"+suffix]
alert.AlertLabels = s.getMapFromString(data["alertLabels"+suffix])
alert.AlertName = data["alertName"+suffix]
alert.ServiceName = data["serviceName"]
if len(data["replicas"]) > 0 {
alert.Replicas, _ = strconv.Atoi(data["replicas"])
}
s.formatAlert(&alert)
if s.isValidAlert(&alert) {
return alert, nil
}
}
return prometheus.Alert{}, fmt.Errorf("Not a valid alert")
}
func (s *serve) getMapFromString(value string) map[string]string {
mappedValue := map[string]string{}
if len(value) > 0 {
for _, label := range strings.Split(value, ",") {
values := strings.Split(label, "=")
mappedValue[values[0]] = values[1]
}
}
return mappedValue
}
func (s *serve) getAlerts(req *http.Request) []prometheus.Alert {
alerts := []prometheus.Alert{}
alertDecode := prometheus.Alert{}
decoder.Decode(&alertDecode, req.Form)
if s.isValidAlert(&alertDecode) {
alertDecode.AlertAnnotations = s.getMapFromString(req.URL.Query().Get("alertAnnotations"))
alertDecode.AlertLabels = s.getMapFromString(req.URL.Query().Get("alertLabels"))
s.formatAlert(&alertDecode)
s.alerts[alertDecode.AlertNameFormatted] = alertDecode
alerts = append(alerts, alertDecode)
logPrintf("Adding alert %s for the service %s\n", alertDecode.AlertName, alertDecode.ServiceName, alertDecode)
}
replicas := 0
if len(req.URL.Query().Get("replicas")) > 0 {
replicas, _ = strconv.Atoi(req.URL.Query().Get("replicas"))
}
for i := 1; i <= 10; i++ {
alertName := req.URL.Query().Get(fmt.Sprintf("alertName.%d", i))
annotations := s.getMapFromString(req.URL.Query().Get(fmt.Sprintf("alertAnnotations.%d", i)))
labels := s.getMapFromString(req.URL.Query().Get(fmt.Sprintf("alertLabels.%d", i)))
alert := prometheus.Alert{
ServiceName: alertDecode.ServiceName,
AlertName: alertName,
AlertIf: req.URL.Query().Get(fmt.Sprintf("alertIf.%d", i)),
AlertFor: req.URL.Query().Get(fmt.Sprintf("alertFor.%d", i)),
AlertAnnotations: annotations,
AlertLabels: labels,
Replicas: replicas,
}
s.formatAlert(&alert)
if !s.isValidAlert(&alert) {
break
}
s.alerts[alert.AlertNameFormatted] = alert
logPrintf("Adding alert %s for the service %s\n", alert.AlertName, alert.ServiceName, alert)
alerts = append(alerts, alert)
}
return alerts
}
type alertIfShortcut struct {
expanded string
annotations map[string]string
labels map[string]string
}
type alertTemplateInput struct {
Alert *prometheus.Alert
Values []string
}
var alertIfShortcutData = map[string]alertIfShortcut{
"@service_mem_limit": alertIfShortcut{
expanded: `container_memory_usage_bytes{container_label_com_docker_swarm_service_name="{{ .Alert.ServiceName }}"}/container_spec_memory_limit_bytes{container_label_com_docker_swarm_service_name="{{ .Alert.ServiceName }}"} > {{ index .Values 0 }}`,
annotations: map[string]string{"summary": "Memory of the service {{ .Alert.ServiceName }} is over {{ index .Values 0 }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}"},
},
"@node_mem_limit": alertIfShortcut{
expanded: `(sum by (instance) (node_memory_MemTotal) - sum by (instance) (node_memory_MemFree + node_memory_Buffers + node_memory_Cached)) / sum by (instance) (node_memory_MemTotal) > {{ index .Values 0 }}`,
annotations: map[string]string{"summary": "Memory of a node is over {{ index .Values 0 }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}"},
},
"@node_mem_limit_total_above": alertIfShortcut{
expanded: `(sum(node_memory_MemTotal{job="{{ .Alert.ServiceName }}"}) - sum(node_memory_MemFree{job="{{ .Alert.ServiceName }}"} + node_memory_Buffers{job="{{ .Alert.ServiceName }}"} + node_memory_Cached{job="{{ .Alert.ServiceName }}"})) / sum(node_memory_MemTotal{job="{{ .Alert.ServiceName }}"}) > {{ index .Values 0 }}`,
annotations: map[string]string{"summary": "Total memory of the nodes is over {{ index .Values 0 }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}", "scale": "up", "type": "node"},
},
"@node_mem_limit_total_below": alertIfShortcut{
expanded: `(sum(node_memory_MemTotal{job="{{ .Alert.ServiceName }}"}) - sum(node_memory_MemFree{job="{{ .Alert.ServiceName }}"} + node_memory_Buffers{job="{{ .Alert.ServiceName }}"} + node_memory_Cached{job="{{ .Alert.ServiceName }}"})) / sum(node_memory_MemTotal{job="{{ .Alert.ServiceName }}"}) < {{ index .Values 0 }}`,
annotations: map[string]string{"summary": "Total memory of the nodes is below {{ index .Values 0 }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}", "scale": "down", "type": "node"},
},
"@node_fs_limit": alertIfShortcut{
expanded: `(node_filesystem_size{fstype="aufs"} - node_filesystem_free{fstype="aufs"}) / node_filesystem_size{fstype="aufs"} > {{ index .Values 0 }}`,
annotations: map[string]string{"summary": "Disk usage of a node is over {{ index .Values 0 }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}"},
},
"@resp_time_above": alertIfShortcut{
expanded: `sum(rate(http_server_resp_time_bucket{job="{{ .Alert.ServiceName }}", le="{{ index .Values 0 }}"}[{{ index .Values 1 }}])) / sum(rate(http_server_resp_time_count{job="{{ .Alert.ServiceName }}"}[{{ index .Values 1 }}])) < {{ index .Values 2 }}`,
annotations: map[string]string{"summary": "Response time of the service {{ .Alert.ServiceName }} is above {{ index .Values 0 }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}", "scale": "up", "type": "service"},
},
"@resp_time_below": alertIfShortcut{
expanded: `sum(rate(http_server_resp_time_bucket{job="{{ .Alert.ServiceName }}", le="{{ index .Values 0 }}"}[{{ index .Values 1 }}])) / sum(rate(http_server_resp_time_count{job="{{ .Alert.ServiceName }}"}[{{ index .Values 1 }}])) > {{ index .Values 2 }}`,
annotations: map[string]string{"summary": "Response time of the service {{ .Alert.ServiceName }} is below {{ index .Values 0 }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}", "scale": "down", "type": "service"},
},
"@replicas_running": alertIfShortcut{
expanded: `count(container_memory_usage_bytes{container_label_com_docker_swarm_service_name="{{ .Alert.ServiceName }}"}) != {{ .Alert.Replicas }}`,
annotations: map[string]string{"summary": "The number of running replicas of the service {{ .Alert.ServiceName }} is not {{ .Alert.Replicas }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}", "scale": "up", "type": "node"},
},
"@replicas_less_than": alertIfShortcut{
expanded: `count(container_memory_usage_bytes{container_label_com_docker_swarm_service_name="{{ .Alert.ServiceName }}"}) < {{ .Alert.Replicas }}`,
annotations: map[string]string{"summary": "The number of running replicas of the service {{ .Alert.ServiceName }} is less than {{ .Alert.Replicas }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}", "scale": "up", "type": "node"},
},
"@replicas_more_than": alertIfShortcut{
expanded: `count(container_memory_usage_bytes{container_label_com_docker_swarm_service_name="{{ .Alert.ServiceName }}"}) > {{ .Alert.Replicas }}`,
annotations: map[string]string{"summary": "The number of running replicas of the service {{ .Alert.ServiceName }} is more than {{ .Alert.Replicas }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}", "scale": "up", "type": "node"},
},
"@resp_time_server_error": alertIfShortcut{
expanded: `sum(rate(http_server_resp_time_count{job="{{ .Alert.ServiceName }}", code=~"^5..$$"}[{{ index .Values 0 }}])) / sum(rate(http_server_resp_time_count{job="{{ .Alert.ServiceName }}"}[{{ index .Values 0 }}])) > {{ index .Values 1 }}`,
annotations: map[string]string{"summary": "Error rate of the service {{ .Alert.ServiceName }} is above {{ index .Values 1 }}"},
labels: map[string]string{"receiver": "system", "service": "{{ .Alert.ServiceName }}", "type": "errors"},
},
}
func (s *serve) formatAlert(alert *prometheus.Alert) {
alert.AlertNameFormatted = s.getNameFormatted(fmt.Sprintf("%s_%s", alert.ServiceName, alert.AlertName))
if strings.HasPrefix(alert.AlertIf, "@") {
value := ""
alertSplit := strings.Split(alert.AlertIf, ":")
shortcut := alertSplit[0]
if len(alertSplit) > 1 {
value = alertSplit[1]
}
data, ok := alertIfShortcutData[shortcut]
if !ok {
return
}
alert.AlertIf = replaceTags(data.expanded, alert, value)
if alert.AlertAnnotations == nil {
alert.AlertAnnotations = map[string]string{}
}
for k, v := range data.annotations {
if _, ok := alert.AlertAnnotations[k]; !ok {
alert.AlertAnnotations[k] = replaceTags(v, alert, value)
}
}
if alert.AlertLabels == nil {
alert.AlertLabels = map[string]string{}
}
for k, v := range data.labels {
if _, ok := alert.AlertLabels[k]; !ok {
alert.AlertLabels[k] = replaceTags(v, alert, value)
}
}
}
}
func replaceTags(tag string, alert *prometheus.Alert, value string) string {
alertInput := alertTemplateInput{
Alert: alert,
Values: strings.Split(value, ","),
}
t := template.Must(template.New("tag").Parse(tag))
b := new(bytes.Buffer)
t.Execute(b, alertInput)
return b.String()
}
func (s *serve) isValidAlert(alert *prometheus.Alert) bool {
return len(alert.AlertName) > 0 && len(alert.AlertIf) > 0
}
func (s *serve) deleteAlerts(serviceName string) []prometheus.Alert {
alerts := []prometheus.Alert{}
serviceNameFormatted := s.getNameFormatted(serviceName)
for k, v := range s.alerts {
if strings.HasPrefix(k, serviceNameFormatted) {
alerts = append(alerts, v)
delete(s.alerts, k)
}
}
return alerts
}
func (s *serve) getNameFormatted(name string) string {
return strings.Replace(name, "-", "", -1)
}
func (s *serve) getScrape(req *http.Request) prometheus.Scrape {
scrape := prometheus.Scrape{}
decoder.Decode(&scrape, req.Form)
if s.isValidScrape(&scrape) {
s.scrapes[scrape.ServiceName] = scrape
logPrintf("Adding scrape %s\n%v", scrape.ServiceName, scrape)
}
return scrape
}
func (s *serve) isValidScrape(scrape *prometheus.Scrape) bool {
return len(scrape.ServiceName) > 0 && scrape.ScrapePort > 0
}
func (s *serve) getResponse(alerts *[]prometheus.Alert, scrape *prometheus.Scrape, err error, statusCode int) response {
resp := response{
Status: statusCode,
Alerts: *alerts,
Scrape: *scrape,
}
if err != nil {
resp.Message = err.Error()
resp.Status = http.StatusInternalServerError
}
return resp
}
func (s *serve) getScrapeVariablesFromEnv() map[string]string {
scrapeVariablesPrefix := []string{
scrapePort,
serviceName,
}
scrapesVariables := map[string]string{}
for _, e := range os.Environ() {
if key, value := getScrapeFromEnv(e, scrapeVariablesPrefix); len(key) > 0 {
scrapesVariables[key] = value
}
}
return scrapesVariables
}
func (s *serve) parseScrapeFromEnvMap(data map[string]string) ([]prometheus.Scrape, error) {
count := len(data) / 2
// If an odd number was find in the environment variables it means it is missing variables
if len(data)%2 != 0 {
msg := fmt.Errorf("SCRAPE_PORT_* and SERVICE_NAME_* environment variable configuration are not valid.")
return []prometheus.Scrape{}, msg
}
scrapeFromEnv := []prometheus.Scrape{}
for i := 1; i <= count; i++ {
index := strconv.Itoa(i)
if len(data[serviceName+"_"+index]) > 0 && len(data[scrapePort+"_"+index]) > 0 {
scrapePort, err := strconv.Atoi(data[scrapePort+"_"+index])
if err != nil {
return []prometheus.Scrape{}, err
}
scrapeFromEnv = append(scrapeFromEnv, prometheus.Scrape{
ScrapePort: scrapePort,
ServiceName: data[serviceName+"_"+index],
ScrapeType: "static_configs",
})
}
}
return scrapeFromEnv, nil
}
| [
"\"LISTENER_ADDRESS\"",
"\"LISTENER_ADDRESS\""
]
| []
| [
"LISTENER_ADDRESS"
]
| [] | ["LISTENER_ADDRESS"] | go | 1 | 0 | |
main.go | package main
import (
"bufio"
"bytes"
"context"
"database/sql"
"encoding/json"
"flag"
"fmt"
"github.com/hashicorp/go-version"
"github.com/joho/godotenv"
"github.com/zhenorzz/goploy/core"
"github.com/zhenorzz/goploy/model"
"github.com/zhenorzz/goploy/route"
"github.com/zhenorzz/goploy/task"
"github.com/zhenorzz/goploy/utils"
"github.com/zhenorzz/goploy/ws"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"path"
"strconv"
"syscall"
"time"
_ "github.com/go-sql-driver/mysql"
)
var (
help bool
v bool
s string
)
const appVersion = "1.3.4"
func init() {
flag.StringVar(&core.AssetDir, "asset-dir", "", "default: ./")
flag.StringVar(&s, "s", "", "stop")
flag.BoolVar(&help, "help", false, "list available subcommands and some concept guides")
flag.BoolVar(&v, "version", false, "show goploy version")
// 改变默认的 Usage
flag.Usage = usage
}
func usage() {
fmt.Fprintf(os.Stderr, "Options:\n")
flag.PrintDefaults()
}
func main() {
flag.Parse()
if help {
flag.Usage()
return
}
if v {
println(appVersion)
return
}
handleClientSignal()
println(`
______ __
/ ____/___ ____ / /___ __ __
/ / __/ __ \/ __ \/ / __ \/ / / /
/ /_/ / /_/ / /_/ / / /_/ / /_/ /
\____/\____/ .___/_/\____/\__, /
/_/ /____/ ` + appVersion + "\n")
install()
_ = godotenv.Load(core.GetEnvFile())
model.Init()
if err := model.Update(appVersion); err != nil {
println(err.Error())
}
pid := strconv.Itoa(os.Getpid())
_ = ioutil.WriteFile(path.Join(core.GetAssetDir(), "goploy.pid"), []byte(pid), 0755)
println("Start at " + time.Now().String())
println("goploy -h for more help")
println("Current pid: " + pid)
println("Config Loaded: " + core.GetEnvFile())
println("Log: " + os.Getenv("LOG_PATH"))
println("Listen: " + os.Getenv("PORT"))
println("Running...")
core.CreateValidator()
ws.Init()
route.Init()
task.Init()
// server
srv := http.Server{
Addr: ":" + os.Getenv("PORT"),
}
go checkUpdate()
core.Gwg.Add(1)
go func() {
defer core.Gwg.Done()
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
println("Received the signal: " + (<-c).String())
println("Server is trying to shutdown, wait for a minute")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
println("Server shutdown failed, err: %v\n", err)
}
println("Server shutdown gracefully")
println("Task is trying to shutdown, wait for a minute")
if err := task.Shutdown(ctx); err != nil {
println("Task shutdown failed, err: %v\n", err)
}
println("Task shutdown gracefully")
}()
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatal("ListenAndServe: ", err.Error())
}
_ = os.Remove(path.Join(core.GetAssetDir(), "goploy.pid"))
println("Goroutine is trying to shutdown, wait for a minute")
core.Gwg.Wait()
println("Goroutine shutdown gracefully")
println("Success")
return
}
func install() {
_, err := os.Stat(core.GetEnvFile())
if err == nil || os.IsExist(err) {
println("The configuration file already exists, no need to reinstall (if you need to reinstall, please back up the database `goploy` first, delete the .env file, then restart.)")
return
}
println("Installation guide ↓")
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd := exec.Command("rsync", "--version")
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
println(err.Error() + ", detail: " + stderr.String())
panic("Please check if rsync is installed correctly, see https://rsync.samba.org/download.html")
}
git := utils.GIT{}
if err := git.Run("--version"); err != nil {
println(err.Error() + ", detail: " + git.Err.String())
panic("Please check if git is installed correctly, see https://git-scm.com/downloads")
}
inputReader := bufio.NewReader(os.Stdin)
println("Installation guidelines (Enter to confirm input)")
println("Please enter the mysql user:")
mysqlUser, err := inputReader.ReadString('\n')
if err != nil {
panic("There were errors reading, exiting program.")
}
mysqlUser = utils.ClearNewline(mysqlUser)
println("Please enter the mysql password:")
mysqlPassword, err := inputReader.ReadString('\n')
if err != nil {
panic("There were errors reading, exiting program.")
}
mysqlPassword = utils.ClearNewline(mysqlPassword)
if len(mysqlPassword) != 0 {
mysqlPassword = ":" + mysqlPassword
}
println("Please enter the mysql host(default 127.0.0.1, without port):")
mysqlHost, err := inputReader.ReadString('\n')
if err != nil {
panic("There were errors reading, exiting program.")
}
mysqlHost = utils.ClearNewline(mysqlHost)
if len(mysqlHost) == 0 {
mysqlHost = "127.0.0.1"
}
println("Please enter the mysql port(default 3306):")
mysqlPort, err := inputReader.ReadString('\n')
if err != nil {
panic("There were errors reading, exiting program.")
}
mysqlPort = utils.ClearNewline(mysqlPort)
if len(mysqlPort) == 0 {
mysqlPort = "3306"
}
println("Please enter the absolute path of the log directory(default stdout):")
logPath, err := inputReader.ReadString('\n')
if err != nil {
panic("There were errors reading, exiting program.")
}
logPath = utils.ClearNewline(logPath)
if len(logPath) == 0 {
logPath = "stdout"
}
println("Please enter the listening port(default 80):")
port, err := inputReader.ReadString('\n')
if err != nil {
panic("There were errors reading, exiting program.")
}
port = utils.ClearNewline(port)
if len(port) == 0 {
port = "80"
}
println("Start to install the database...")
db, err := sql.Open("mysql", fmt.Sprintf(
"%s%s@tcp(%s:%s)/?charset=utf8mb4,utf8\n",
mysqlUser,
mysqlPassword,
mysqlHost,
mysqlPort))
if err != nil {
panic(err)
}
defer db.Close()
if err := model.ImportSQL(db, "sql/goploy.sql"); err != nil {
panic(err)
}
println("Database installation is complete")
envContent := "# when you edit its value, you need to restart\n"
envContent += "DB_TYPE=mysql\n"
envContent += fmt.Sprintf(
"DB_CONN=%s%s@tcp(%s:%s)/goploy?charset=utf8mb4,utf8\n",
mysqlUser,
mysqlPassword,
mysqlHost,
mysqlPort)
envContent += fmt.Sprintf("SIGN_KEY=%d\n", time.Now().Unix())
envContent += fmt.Sprintf("LOG_PATH=%s\n", logPath)
envContent += "ENV=production\n"
envContent += fmt.Sprintf("PORT=%s\n", port)
println("Start writing configuration file...")
file, err := os.Create(core.GetEnvFile())
if err != nil {
panic(err)
}
defer file.Close()
file.WriteString(envContent)
println("Write configuration file completed")
}
func handleClientSignal() {
switch s {
case "stop":
pidStr, err := ioutil.ReadFile(path.Join(core.GetAssetDir(), "goploy.pid"))
if err != nil {
log.Fatal("handle stop, ", err.Error(), ", may be the server not start")
}
pid, _ := strconv.Atoi(string(pidStr))
process, err := os.FindProcess(pid)
if err != nil {
log.Fatal("handle stop, ", err.Error(), ", may be the server not start")
}
err = process.Signal(syscall.SIGTERM)
if err != nil {
log.Fatal("handle stop, ", err.Error())
}
os.Exit(1)
}
}
func checkUpdate() {
resp, err := http.Get("https://api.github.com/repos/zhenorzz/goploy/releases/latest")
if err != nil {
println("Check failed")
println(err.Error())
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
println("Check failed")
println(err.Error())
return
}
var result map[string]interface{}
if err := json.Unmarshal(body, &result); err != nil {
println("Check failed")
println(err.Error())
return
}
tagName := result["tag_name"].(string)
tagVer, err := version.NewVersion(tagName)
if err != nil {
println("Check version error")
println(err.Error())
return
}
currentVer, _ := version.NewVersion(appVersion)
if tagVer.GreaterThan(currentVer) {
println("New release available")
println(result["html_url"].(string))
}
}
| [
"\"LOG_PATH\"",
"\"PORT\"",
"\"PORT\""
]
| []
| [
"PORT",
"LOG_PATH"
]
| [] | ["PORT", "LOG_PATH"] | go | 2 | 0 | |
mqtt-gateway/src/main/java/enmasse/mqtt/Application.java | /*
* Copyright 2016-2018, EnMasse authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package enmasse.mqtt;
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.Vertx;
import io.vertx.core.logging.SLF4JLogDelegateFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static io.vertx.core.logging.LoggerFactory.LOGGER_DELEGATE_FACTORY_CLASS_NAME;
/**
* EnMasse MQTT gateway main application class
*/
public class Application {
private static final Logger LOG = LoggerFactory.getLogger(Application.class);
private final Vertx vertx = Vertx.vertx();
private final MqttGatewayOptions options;
private final MqttGateway mqttGateway;
private AtomicBoolean running = new AtomicBoolean();
public Application(MqttGatewayOptions options, MqttGateway mqttGateway) {
this.options = options;
this.mqttGateway = mqttGateway;
}
public void registerVerticles() {
if (this.running.compareAndSet(false, true)) {
long startupTimeout = this.options.getStartupTimeout().getSeconds();
try {
CountDownLatch latch = new CountDownLatch(1);
Future<Void> startFuture = Future.future();
startFuture.setHandler(done -> {
if (done.succeeded()) {
latch.countDown();
} else {
LOG.error("Could not start MQTT gateway", done.cause());
}
});
// start deploying more verticle instances
this.deployVerticles(startFuture);
// wait for deploying end
if (latch.await(startupTimeout, TimeUnit.SECONDS)) {
LOG.info("MQTT gateway startup completed successfully");
} else {
LOG.error("Startup timed out after {} seconds, shutting down ...", startupTimeout);
this.shutdown();
}
} catch (InterruptedException e) {
LOG.error("Startup process has been interrupted, shutting down ...");
this.shutdown();
}
}
}
/**
* Execute verticles deploy operation
*
* @param resultHandler handler called when the deploy ends
*/
private void deployVerticles(Future<Void> resultHandler) {
LOG.debug("Starting up MQTT gateway verticle");
Future<Void> result = Future.future();
this.vertx.deployVerticle(this.mqttGateway, done -> {
if (done.succeeded()) {
LOG.debug("Verticle instance deployed [{}]", done.result());
result.complete();
} else {
LOG.debug("Failed to deploy verticle instance {}", done.cause());
result.fail(done.cause());
}
});
result.setHandler(done -> {
if (done.succeeded()) {
resultHandler.complete();
} else {
resultHandler.fail(done.cause());
}
});
}
public void shutdown() {
if (this.running.compareAndSet(true, false)) {
this.shutdown(this.options.getStartupTimeout().getSeconds(), result -> {
// do nothing ?
});
}
}
/**
* Execute Vert.x shutdown with related verticles
* @param timeout max timeout to wait for shutdown
* @param shutdownHandler handler called when the shutdown ends
*/
private void shutdown(long timeout, Handler<Boolean> shutdownHandler) {
try {
CountDownLatch latch = new CountDownLatch(1);
if (this.vertx != null) {
this.vertx.close(done -> {
if (done.failed()) {
LOG.error("Could not shut down MQTT gateway cleanly", done.cause());
}
latch.countDown();
});
if (latch.await(timeout, TimeUnit.SECONDS)) {
LOG.info("MQTT gateway shut down completed");
shutdownHandler.handle(Boolean.TRUE);
} else {
LOG.error("Shut down of MQTT gateway timed out, aborting...");
shutdownHandler.handle(Boolean.FALSE);
}
}
} catch (InterruptedException e) {
LOG.error("Shut down of MQTT gateway has been interrupted, aborting...");
shutdownHandler.handle(Boolean.FALSE);
}
}
public static void main(String[] args) {
if (System.getProperty(LOGGER_DELEGATE_FACTORY_CLASS_NAME) == null) {
System.setProperty(LOGGER_DELEGATE_FACTORY_CLASS_NAME, SLF4JLogDelegateFactory.class.getName());
}
Map<String, String> env = System.getenv();
MqttGatewayOptions options = MqttGatewayOptions.fromEnv(env);
LOG.info("MqttGateway starting with options: {}", options);
MqttGateway gateway = new MqttGateway(options);
Application app = new Application(options, gateway);
app.registerVerticles();
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
super.run();
LOG.info("MqttGateway shutdown");
app.shutdown();
}
});
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
backend/app/api.py | import os
import json
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.config import Config
from starlette.requests import Request
from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import HTMLResponse, RedirectResponse
from authlib.integrations.starlette_client import OAuth, OAuthError
app = FastAPI()
origins = [
"http://localhost:3000",
"localhost:3000"
]
app.add_middleware(SessionMiddleware, secret_key=os.environ.get("GOOGLE_CLIENT_SECRET"))
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
config = Config('.env')
oauth = OAuth(config)
CONF_URL = 'https://accounts.google.com/.well-known/openid-configuration'
oauth.register(
name='google',
server_metadata_url=CONF_URL,
client_kwargs={
'scope': 'openid email profile'
}
)
@app.route('/')
async def homepage(request: Request):
user = request.session.get('user')
if user:
data = json.dumps(user)
html = (
f'<pre>{data}</pre>'
'<a href="/logout">logout</a>'
)
return HTMLResponse(html)
return HTMLResponse('<a href="/login">login</a>')
@app.route('/login')
async def login(request: Request):
redirect_uri = request.url_for('auth')
return await oauth.google.authorize_redirect(request, redirect_uri)
@app.route('/auth')
async def auth(request: Request):
try:
token = await oauth.google.authorize_access_token(request)
except OAuthError as error:
return HTMLResponse(f'<h1>{error.error}</h1>')
user = await oauth.google.parse_id_token(request, token)
request.session['user'] = dict(user)
return RedirectResponse(url='/')
@app.route('/logout')
async def logout(request: Request):
request.session.pop('user', None)
return RedirectResponse(url='/')
| []
| []
| [
"GOOGLE_CLIENT_SECRET"
]
| [] | ["GOOGLE_CLIENT_SECRET"] | python | 1 | 0 | |
test/functional/test_runner.py | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Woochain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:WoochainTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py',
'fundrawtransaction.py',
'p2p-compactblocks.py',
'segwit.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'p2p-segwit.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py',
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'address_types.py',
'reindex.py',
# vv Tests less than 30s vv
'keypool-topup.py',
'zmq_test.py',
'Woochain_cli.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'txn_clone.py --segwit',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'multiwallet.py',
'multiwallet.py --usecli',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'disconnect_ban.py',
'decodescript.py',
'blockchain.py',
'deprecated_rpc.py',
'disablewallet.py',
'net.py',
'keypool.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'mining.py',
'bumpfee.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'wallet-encryption.py',
'bipdersig-p2p.py',
'bip65-cltv-p2p.py',
'uptime.py',
'resendwallettransactions.py',
'minchainwork.py',
'p2p-fingerprint.py',
'uacomment.py',
'p2p-acceptblock.py',
'feature_logging.py',
'node_network_limited.py',
'conf_args.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'pruning.py',
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
'dbcrash.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py',
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'p2p-feefilter.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'assumevalid.py',
'example_test.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'notifications.py',
'invalidateblock.py',
'replace-by-fee.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/Woochain_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_Woochaind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/Woochain/Woochain/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/Woochain/Woochain/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_Woochaind):
print("No functional tests to run. Wallet, utils, and Woochaind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0):
# Warn if Woochaind is already running (unix only)
try:
if subprocess.check_output(["pidof", "Woochaind"]) is not None:
print("%sWARNING!%s There is already a Woochaind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/Woochaind' + exeext
os.environ["BITCOINCLI"] = build_dir + '/src/Woochain-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except Exception as e:
print(e.output)
raise e
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie Woochainds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `Woochain-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| []
| []
| [
"BITCOINCLI",
"TRAVIS",
"BITCOIND"
]
| [] | ["BITCOINCLI", "TRAVIS", "BITCOIND"] | python | 3 | 0 | |
controllers/mongodb_controller.go | /*
# Copyright 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package controllers
import (
"bytes"
"context"
"crypto/rand"
"fmt"
"math"
"math/big"
"os"
"strconv"
"strings"
"text/template"
"time"
"github.com/ghodss/yaml"
"github.com/go-logr/logr"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
resource "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
mongodbv1alpha1 "github.com/IBM/ibm-mongodb-operator/api/v1alpha1"
)
// MongoDBReconciler reconciles a MongoDB object
type MongoDBReconciler struct {
Client client.Client
Reader client.Reader
Log logr.Logger
Scheme *runtime.Scheme
}
//
const mongodbOperatorURI = `mongodbs.operator.ibm.com`
const defaultPVCSize = `20Gi`
// MongoDB StatefulSet Data
type mongoDBStatefulSetData struct {
Replicas int
ImageRepo string
StorageClass string
InitImage string
BootstrapImage string
MetricsImage string
CPULimit string
CPURequest string
MemoryLimit string
MemoryRequest string
NamespaceName string
StsLabels map[string]string
PodLabels map[string]string
PVCSize string
UserID int
}
// +kubebuilder:rbac:groups=mongodb.operator.ibm.com,namespace=ibm-common-services,resources=mongodbs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=mongodb.operator.ibm.com,namespace=ibm-common-services,resources=mongodbs/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,namespace=ibm-common-services,resources=services;services/finalizers;serviceaccounts;endpoints;persistentvolumeclaims;events;configmaps;secrets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=apps,namespace=ibm-common-services,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=monitoring.coreos.com,namespace=ibm-common-services,resources=servicemonitors,verbs=get;create
// +kubebuilder:rbac:groups=apps,namespace=ibm-common-services,resourceNames=ibm-mongodb-operator,resources=deployments/finalizers,verbs=update
// +kubebuilder:rbac:groups=operator.ibm.com,namespace=ibm-common-services,resources=mongodbs;mongodbs/finalizers;mongodbs/status,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=certmanager.k8s.io,namespace=ibm-common-services,resources=certificates;certificaterequests;orders;challenges;issuers,verbs=get;list;watch;create;update;patch;delete
func (r *MongoDBReconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
_ = r.Log.WithValues("mongodb", request.NamespacedName)
// Fetch the MongoDB instance
instance := &mongodbv1alpha1.MongoDB{}
err := r.Client.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
r.Log.Info("creating mongodb service account")
if err := r.createFromYaml(instance, []byte(mongoSA)); err != nil {
return reconcile.Result{}, err
}
r.Log.Info("creating mongodb service")
if err := r.createFromYaml(instance, []byte(service)); err != nil {
return reconcile.Result{}, err
}
r.Log.Info("creating mongodb icp service")
if err := r.createFromYaml(instance, []byte(icpService)); err != nil {
return reconcile.Result{}, err
}
metadatalabel := map[string]string{"app.kubernetes.io/name": "icp-mongodb", "app.kubernetes.io/component": "database",
"app.kubernetes.io/managed-by": "operator", "app.kubernetes.io/instance": "icp-mongodb", "release": "mongodb"}
r.Log.Info("creating icp mongodb config map")
//Calculate MongoDB cache Size
var cacheSize float64
var cacheSizeGB float64
if instance.Spec.Resources.Limits.Memory().String() != "0" {
ramMB := instance.Spec.Resources.Limits.Memory().ScaledValue(resource.Mega)
// Cache Size is 40 percent of RAM
cacheSize = float64(ramMB) * 0.4
// Convert to gig
cacheSizeGB = cacheSize / 1000.0
// Round to fit config
cacheSizeGB = math.Floor(cacheSizeGB*100) / 100
} else {
//default value is 5Gi
cacheSizeGB = 2.0
}
monogdbConfigmapData := struct {
CacheSize float64
}{
CacheSize: cacheSizeGB,
}
// TO DO -- convert configmap to take option.
var mongodbConfigYaml bytes.Buffer
tc := template.Must(template.New("mongodbconfigmap").Parse(mongodbConfigMap))
if err := tc.Execute(&mongodbConfigYaml, monogdbConfigmapData); err != nil {
return reconcile.Result{}, err
}
r.Log.Info("creating or updating mongodb configmap")
if err := r.createUpdateFromYaml(instance, mongodbConfigYaml.Bytes()); err != nil {
return reconcile.Result{}, err
}
if err := r.createFromYaml(instance, []byte(mongodbConfigMap)); err != nil {
return reconcile.Result{}, err
}
r.Log.Info("creating or updating icp mongodb init config map")
if err := r.createUpdateFromYaml(instance, []byte(initConfigMap)); err != nil {
return reconcile.Result{}, err
}
r.Log.Info("creating icp mongodb install config map")
if err := r.createFromYaml(instance, []byte(installConfigMap)); err != nil {
return reconcile.Result{}, err
}
// Create admin user and password as random string
// TODO: allow user to give a Secret
var pass, user string
user = createRandomAlphaNumeric(8)
pass = createRandomAlphaNumeric(13)
mongodbAdmin := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "icp-mongodb",
},
Name: "icp-mongodb-admin",
Namespace: instance.GetNamespace(),
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"user": user,
"password": pass,
},
}
// Set CommonServiceConfig instance as the owner and controller
// if err := controllerutil.SetControllerReference(instance, mongodbAdmin, r.scheme); err != nil {
// return reconcile.Result{}, err
// }
r.Log.Info("creating icp mongodb admin secret")
if err = r.Client.Create(context.TODO(), mongodbAdmin); err != nil && !errors.IsAlreadyExists(err) {
return reconcile.Result{}, err
}
mongodbMetric := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: metadatalabel,
Name: "icp-mongodb-metrics",
Namespace: instance.GetNamespace(),
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"user": "metrics",
"password": "icpmetrics",
},
}
// Set CommonServiceConfig instance as the owner and controller
if err := controllerutil.SetControllerReference(instance, mongodbMetric, r.Scheme); err != nil {
return reconcile.Result{}, err
}
r.Log.Info("creating icp mongodb metric secret")
if err = r.Client.Create(context.TODO(), mongodbMetric); err != nil && !errors.IsAlreadyExists(err) {
return reconcile.Result{}, err
}
keyfileSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: metadatalabel,
Name: "icp-mongodb-keyfile",
Namespace: instance.GetNamespace(),
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"key.txt": "icptest",
},
}
// Set CommonServiceConfig instance as the owner and controller
if err := controllerutil.SetControllerReference(instance, keyfileSecret, r.Scheme); err != nil {
return reconcile.Result{}, err
}
r.Log.Info("creating icp mongodb keyfile secret")
if err = r.Client.Create(context.TODO(), keyfileSecret); err != nil && !errors.IsAlreadyExists(err) {
return reconcile.Result{}, err
}
var storageclass string
if instance.Status.StorageClass == "" {
if instance.Spec.StorageClass == "" {
// TODO: weird because the storage class on OCP is opened for all
// Need to deploy an OCP cluster on AWS to verify
storageclass, err = r.getstorageclass()
if err != nil {
return reconcile.Result{}, err
}
} else {
storageclass = instance.Spec.StorageClass
}
} else {
if instance.Spec.StorageClass != "" && instance.Spec.StorageClass != instance.Status.StorageClass {
r.Log.Info("You need to delete the monogodb cr before switch the storage class. Please note that this will lose all your datamake")
}
storageclass = instance.Status.StorageClass
}
// Default values
cpuRequest := "2000m"
memoryRequest := "5Gi"
cpuLimit := "2000m"
memoryLimit := "5Gi"
// Check cpu request values and default if not there
if instance.Spec.Resources.Requests.Cpu().String() != "0" {
cpuRequest = instance.Spec.Resources.Requests.Cpu().String()
}
// Check memory request values and default if not there
if instance.Spec.Resources.Requests.Memory().String() != "0" {
memoryRequest = instance.Spec.Resources.Requests.Memory().String()
}
// Check cpu limit values and default if not there
if instance.Spec.Resources.Limits.Cpu().String() != "0" {
cpuLimit = instance.Spec.Resources.Limits.Cpu().String()
}
// Check memory limit values and default if not there
if instance.Spec.Resources.Limits.Memory().String() != "0" {
memoryLimit = instance.Spec.Resources.Limits.Memory().String()
}
// Default values
PVCSizeRequest := defaultPVCSize
// If PVC already exist and the value does not match the PVCSizeRequest then log information that it cannot be changed.
pvc := &corev1.PersistentVolumeClaim{}
err = r.Client.Get(context.TODO(), types.NamespacedName{Name: "mongodbdir-icp-mongodb-0", Namespace: instance.Namespace}, pvc)
if err == nil {
PVCSizeRequest = pvc.Spec.Resources.Requests.Storage().String()
if instance.Spec.PVC.Resources.Requests.Storage().String() != "0" {
if (PVCSizeRequest != instance.Spec.PVC.Resources.Requests.Storage().String()) && (instance.Spec.PVC.Resources.Requests.Storage().String() != defaultPVCSize) {
r.Log.Info("mongoDB Persistent Volume Claim already exists, it's size is immutable, ignoring requested storage size for the PVC")
}
} else {
if PVCSizeRequest != defaultPVCSize {
r.Log.Info("mongoDB Persistent Volume Claim already exists, it's size is immutable.")
r.Log.Info("the PVC storage request is not set to the current default nor is it specified in the Custom Resource")
}
}
} else if errors.IsNotFound(err) {
// Check PVC size request values and default if not there
if instance.Spec.PVC.Resources.Requests.Storage().String() != "0" {
PVCSizeRequest = instance.Spec.PVC.Resources.Requests.Storage().String()
}
}
// Select User to use
cppConfig := &corev1.ConfigMap{}
err = r.Client.Get(context.TODO(), types.NamespacedName{Name: "ibm-cpp-config", Namespace: instance.Namespace}, cppConfig)
if err != nil {
return reconcile.Result{}, err
}
uid := 0
if clusterType, exists := cppConfig.Data["kubernetes_cluster_type"]; exists {
if clusterType != "ocp" {
uid = 1000
}
}
// Check if statefulset already exists
sts := &appsv1.StatefulSet{}
var stsLabels map[string]string
var podLabels map[string]string
err = r.Client.Get(context.TODO(), types.NamespacedName{Name: "icp-mongodb", Namespace: instance.Namespace}, sts)
if err == nil {
r.Log.Info("succeeded to get statefulset check")
stsLabels = sts.ObjectMeta.Labels
podLabels = sts.Spec.Template.ObjectMeta.Labels
} else if errors.IsNotFound(err) {
r.Log.Info("statefulset not found for labels")
constStsLabels := make(map[string]string)
constStsLabels["app"] = "icp-mongodb"
constStsLabels["release"] = "mongodb"
constStsLabels["app.kubernetes.io/instance"] = mongodbOperatorURI
constStsLabels["app.kubernetes.io/managed-by"] = mongodbOperatorURI
constStsLabels["app.kubernetes.io/name"] = mongodbOperatorURI
stsLabels = constStsLabels
constPodLabels := make(map[string]string)
constPodLabels["app.kubernetes.io/instance"] = "common-mongodb"
constPodLabels["app"] = "icp-mongodb"
constPodLabels["release"] = "mongodb"
podLabels = constPodLabels
} else {
return reconcile.Result{}, err
}
//Set Replicas
//Get current number of replicas in cluster based on number of PVCs
pvcs := &corev1.PersistentVolumeClaimList{}
err = r.Client.List(context.TODO(), pvcs, &client.ListOptions{
Namespace: instance.Namespace,
})
pvcCount := 0
if err == nil {
// loop items in pvcs and count mongodbdir
for _, pvc := range pvcs.Items {
if strings.Contains(pvc.ObjectMeta.Name, "mongodbdir-icp-mongodb") {
pvcCount++
r.Log.Info("Found PVC" + pvc.ObjectMeta.Name)
}
}
} else {
return reconcile.Result{}, err
}
//check pvc count with replicas
//if pvcCount is greater than the replicas, then at one time there must have been more replicas
replicas := instance.Spec.Replicas
if pvcCount > replicas {
replicas = pvcCount
r.Log.Info("Ignoring Replica spec, there are more mongodbdir-icp-mongodb PVCs than the current relica request.")
r.Log.Info("PVC count: " + strconv.Itoa(pvcCount))
}
stsData := mongoDBStatefulSetData{
Replicas: replicas,
ImageRepo: instance.Spec.ImageRegistry,
StorageClass: storageclass,
InitImage: os.Getenv("IBM_MONGODB_INSTALL_IMAGE"),
BootstrapImage: os.Getenv("IBM_MONGODB_IMAGE"),
MetricsImage: os.Getenv("IBM_MONGODB_EXPORTER_IMAGE"),
CPULimit: cpuLimit,
CPURequest: cpuRequest,
MemoryLimit: memoryLimit,
MemoryRequest: memoryRequest,
NamespaceName: instance.Namespace,
StsLabels: stsLabels,
PodLabels: podLabels,
PVCSize: PVCSizeRequest,
UserID: uid,
}
var stsYaml bytes.Buffer
t := template.Must(template.New("statefulset").Parse(statefulset))
if err := t.Execute(&stsYaml, stsData); err != nil {
return reconcile.Result{}, err
}
r.Log.Info("creating mongodb statefulset")
if err := r.createUpdateFromYaml(instance, stsYaml.Bytes()); err != nil {
return reconcile.Result{}, err
}
instance.Status.StorageClass = storageclass
if err := r.Client.Status().Update(context.TODO(), instance); err != nil {
return reconcile.Result{}, err
}
// sign certificate
r.Log.Info("creating root-ca-cert")
if err := r.createFromYaml(instance, []byte(godIssuerYaml)); err != nil {
r.Log.Error(err, "create god-issuer fail")
return reconcile.Result{}, err
}
r.Log.Info("creating root-ca-cert")
if err := r.createFromYaml(instance, []byte(rootCertYaml)); err != nil {
r.Log.Error(err, "create root-ca-cert fail")
return reconcile.Result{}, err
}
r.Log.Info("creating root-issuer")
if err := r.createFromYaml(instance, []byte(rootIssuerYaml)); err != nil {
r.Log.Error(err, "create root-issuer fail")
return reconcile.Result{}, err
}
r.Log.Info("creating icp-mongodb-client-cert")
if err := r.createFromYaml(instance, []byte(clientCertYaml)); err != nil {
r.Log.Error(err, "create icp-mongodb-client-cert fail")
return reconcile.Result{}, err
}
// Get the StatefulSet
sts = &appsv1.StatefulSet{}
if err = r.Client.Get(context.TODO(), types.NamespacedName{Name: "icp-mongodb", Namespace: instance.Namespace}, sts); err != nil {
return reconcile.Result{}, err
}
// Add controller on PVC
if err = r.addControlleronPVC(instance, sts); err != nil {
return reconcile.Result{}, err
}
if sts.Status.UpdatedReplicas != sts.Status.Replicas || sts.Status.UpdatedReplicas != sts.Status.ReadyReplicas {
r.Log.Info("Waiting Mongodb to be ready ...")
return reconcile.Result{Requeue: true, RequeueAfter: time.Minute}, nil
}
r.Log.Info("Mongodb is ready")
return ctrl.Result{}, nil
}
// Move to separate file begin
func (r *MongoDBReconciler) createFromYaml(instance *mongodbv1alpha1.MongoDB, yamlContent []byte) error {
obj := &unstructured.Unstructured{}
jsonSpec, err := yaml.YAMLToJSON(yamlContent)
if err != nil {
return fmt.Errorf("could not convert yaml to json: %v", err)
}
if err := obj.UnmarshalJSON(jsonSpec); err != nil {
return fmt.Errorf("could not unmarshal resource: %v", err)
}
obj.SetNamespace(instance.Namespace)
// Set CommonServiceConfig instance as the owner and controller
if err := controllerutil.SetControllerReference(instance, obj, r.Scheme); err != nil {
return err
}
err = r.Client.Create(context.TODO(), obj)
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("could not Create resource: %v", err)
}
return nil
}
func (r *MongoDBReconciler) createUpdateFromYaml(instance *mongodbv1alpha1.MongoDB, yamlContent []byte) error {
obj := &unstructured.Unstructured{}
jsonSpec, err := yaml.YAMLToJSON(yamlContent)
if err != nil {
return fmt.Errorf("could not convert yaml to json: %v", err)
}
if err := obj.UnmarshalJSON(jsonSpec); err != nil {
return fmt.Errorf("could not unmarshal resource: %v", err)
}
obj.SetNamespace(instance.Namespace)
// Set CommonServiceConfig instance as the owner and controller
if err := controllerutil.SetControllerReference(instance, obj, r.Scheme); err != nil {
return err
}
err = r.Client.Create(context.TODO(), obj)
if err != nil {
if errors.IsAlreadyExists(err) {
if err := r.Client.Update(context.TODO(), obj); err != nil {
return fmt.Errorf("could not Update resource: %v", err)
}
return nil
}
return fmt.Errorf("could not Create resource: %v", err)
}
return nil
}
func (r *MongoDBReconciler) getstorageclass() (string, error) {
scList := &storagev1.StorageClassList{}
err := r.Reader.List(context.TODO(), scList)
if err != nil {
return "", err
}
if len(scList.Items) == 0 {
return "", fmt.Errorf("could not find storage class in the cluster")
}
var defaultSC []string
var nonDefaultSC []string
for _, sc := range scList.Items {
if sc.ObjectMeta.GetAnnotations()["storageclass.kubernetes.io/is-default-class"] == "true" || sc.ObjectMeta.GetAnnotations()["storageclass.beta.kubernetes.io/is-default-class"] == "true" {
defaultSC = append(defaultSC, sc.GetName())
continue
}
if sc.Provisioner == "kubernetes.io/no-provisioner" {
continue
}
nonDefaultSC = append(nonDefaultSC, sc.GetName())
}
if len(defaultSC) != 0 {
return defaultSC[0], nil
}
if len(nonDefaultSC) != 0 {
return nonDefaultSC[0], nil
}
return "", fmt.Errorf("could not find dynamic provisioner storage class in the cluster nor is there a default storage class")
}
func (r *MongoDBReconciler) addControlleronPVC(instance *mongodbv1alpha1.MongoDB, sts *appsv1.StatefulSet) error {
// Fetch the list of the PersistentVolumeClaim generated by the StatefulSet
pvcList := &corev1.PersistentVolumeClaimList{}
err := r.Client.List(context.TODO(), pvcList, &client.ListOptions{
Namespace: instance.Namespace,
LabelSelector: labels.SelectorFromSet(sts.ObjectMeta.Labels),
})
if err != nil {
return err
}
for _, pvc := range pvcList.Items {
if pvc.ObjectMeta.OwnerReferences == nil {
pvc := pvc
if err := controllerutil.SetControllerReference(instance, &pvc, r.Scheme); err != nil {
return err
}
if err = r.Client.Update(context.TODO(), &pvc); err != nil {
return err
}
}
}
return nil
}
// Create Random String
func createRandomAlphaNumeric(length int) string {
const charset = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
nbig, _ := rand.Int(rand.Reader, big.NewInt(int64(len(charset))))
n := int(nbig.Int64())
byteString := make([]byte, length)
for i := range byteString {
byteString[i] = charset[n]
}
return string(byteString)
}
// Move to separate file?
func (r *MongoDBReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&mongodbv1alpha1.MongoDB{}).
Owns(&appsv1.StatefulSet{}).Owns(&corev1.ConfigMap{}).Owns(&corev1.ServiceAccount{}).
Owns(&corev1.Service{}).
Complete(r)
}
| [
"\"IBM_MONGODB_INSTALL_IMAGE\"",
"\"IBM_MONGODB_IMAGE\"",
"\"IBM_MONGODB_EXPORTER_IMAGE\""
]
| []
| [
"IBM_MONGODB_IMAGE",
"IBM_MONGODB_INSTALL_IMAGE",
"IBM_MONGODB_EXPORTER_IMAGE"
]
| [] | ["IBM_MONGODB_IMAGE", "IBM_MONGODB_INSTALL_IMAGE", "IBM_MONGODB_EXPORTER_IMAGE"] | go | 3 | 0 | |
main.go | package main
import (
"log"
"net"
"os"
"github.com/aau-claaudia/citen/ssh"
)
func main() {
listenAt := os.Getenv("CITEN_LISTEN")
// default listen to localhost at a random port
if listenAt == "" {
listenAt = "127.0.0.1:0"
}
l, err := net.Listen("tcp", listenAt)
if err != nil {
log.Fatalf("unable to listen for ssh traffic: %s", err)
}
log.Print("Listening on ", l.Addr())
sshServer := &ssh.Server{}
err = sshServer.Serve(l)
log.Fatalf("failed serving ssh: %s", err)
}
| [
"\"CITEN_LISTEN\""
]
| []
| [
"CITEN_LISTEN"
]
| [] | ["CITEN_LISTEN"] | go | 1 | 0 | |
src/runtime/runtime-gdb_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"bytes"
"fmt"
"go/build"
"internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
)
func checkGdbEnvironment(t *testing.T) {
testenv.MustHaveGoBuild(t)
switch runtime.GOOS {
case "darwin":
t.Skip("gdb does not work on darwin")
case "netbsd":
t.Skip("gdb does not work with threads on NetBSD; see golang.org/issue/22893 and gnats.netbsd.org/52548")
case "windows":
t.Skip("gdb tests fail on Windows: https://golang.org/issue/22687")
case "linux":
if runtime.GOARCH == "ppc64" {
t.Skip("skipping gdb tests on linux/ppc64; see golang.org/issue/17366")
}
}
if final := os.Getenv("GOROOT_FINAL"); final != "" && runtime.GOROOT() != final {
t.Skip("gdb test can fail with GOROOT_FINAL pending")
}
}
func checkGdbVersion(t *testing.T) {
// Issue 11214 reports various failures with older versions of gdb.
out, err := exec.Command("gdb", "--version").CombinedOutput()
if err != nil {
t.Skipf("skipping: error executing gdb: %v", err)
}
re := regexp.MustCompile(`([0-9]+)\.([0-9]+)`)
matches := re.FindSubmatch(out)
if len(matches) < 3 {
t.Skipf("skipping: can't determine gdb version from\n%s\n", out)
}
major, err1 := strconv.Atoi(string(matches[1]))
minor, err2 := strconv.Atoi(string(matches[2]))
if err1 != nil || err2 != nil {
t.Skipf("skipping: can't determine gdb version: %v, %v", err1, err2)
}
if major < 7 || (major == 7 && minor < 7) {
t.Skipf("skipping: gdb version %d.%d too old", major, minor)
}
t.Logf("gdb version %d.%d", major, minor)
}
func checkGdbPython(t *testing.T) {
if runtime.GOOS == "solaris" && testenv.Builder() != "solaris-amd64-smartosbuildlet" {
t.Skip("skipping gdb python tests on solaris; see golang.org/issue/20821")
}
cmd := exec.Command("gdb", "-nx", "-q", "--batch", "-iex", "python import sys; print('go gdb python support')")
out, err := cmd.CombinedOutput()
if err != nil {
t.Skipf("skipping due to issue running gdb: %v", err)
}
if strings.TrimSpace(string(out)) != "go gdb python support" {
t.Skipf("skipping due to lack of python gdb support: %s", out)
}
}
const helloSource = `
import "fmt"
import "runtime"
var gslice []string
func main() {
mapvar := make(map[string]string, 13)
mapvar["abc"] = "def"
mapvar["ghi"] = "jkl"
strvar := "abc"
ptrvar := &strvar
slicevar := make([]string, 0, 16)
slicevar = append(slicevar, mapvar["abc"])
fmt.Println("hi")
runtime.KeepAlive(ptrvar)
_ = ptrvar
gslice = slicevar
runtime.KeepAlive(mapvar)
} // END_OF_PROGRAM
`
func lastLine(src []byte) int {
eop := []byte("END_OF_PROGRAM")
for i, l := range bytes.Split(src, []byte("\n")) {
if bytes.Contains(l, eop) {
return i
}
}
return 0
}
func TestGdbPython(t *testing.T) {
testGdbPython(t, false)
}
func TestGdbPythonCgo(t *testing.T) {
if runtime.GOARCH == "mips" || runtime.GOARCH == "mipsle" || runtime.GOARCH == "mips64" {
testenv.SkipFlaky(t, 18784)
}
testGdbPython(t, true)
}
func testGdbPython(t *testing.T, cgo bool) {
if cgo && !build.Default.CgoEnabled {
t.Skip("skipping because cgo is not enabled")
}
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
checkGdbPython(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
var buf bytes.Buffer
buf.WriteString("package main\n")
if cgo {
buf.WriteString(`import "C"` + "\n")
}
buf.WriteString(helloSource)
src := buf.Bytes()
err = ioutil.WriteFile(filepath.Join(dir, "main.go"), src, 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
nLines := lastLine(src)
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
args := []string{"-nx", "-q", "--batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "info auto-load python-scripts",
"-ex", "set python print-stack full",
"-ex", "br fmt.Println",
"-ex", "run",
"-ex", "echo BEGIN info goroutines\n",
"-ex", "info goroutines",
"-ex", "echo END\n",
"-ex", "up", // up from fmt.Println to main
"-ex", "echo BEGIN print mapvar\n",
"-ex", "print mapvar",
"-ex", "echo END\n",
"-ex", "echo BEGIN print strvar\n",
"-ex", "print strvar",
"-ex", "echo END\n",
"-ex", "echo BEGIN info locals\n",
"-ex", "info locals",
"-ex", "echo END\n",
"-ex", "down", // back to fmt.Println (goroutine 2 below only works at bottom of stack. TODO: fix that)
"-ex", "echo BEGIN goroutine 1 bt\n",
"-ex", "goroutine 1 bt",
"-ex", "echo END\n",
"-ex", "echo BEGIN goroutine 2 bt\n",
"-ex", "goroutine 2 bt",
"-ex", "echo END\n",
"-ex", "clear fmt.Println", // clear the previous break point
"-ex", fmt.Sprintf("br main.go:%d", nLines), // new break point at the end of main
"-ex", "c",
"-ex", "echo BEGIN goroutine 1 bt at the end\n",
"-ex", "goroutine 1 bt",
"-ex", "echo END\n",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
t.Logf("gdb output: %s\n", got)
firstLine := bytes.SplitN(got, []byte("\n"), 2)[0]
if string(firstLine) != "Loading Go Runtime support." {
// This can happen when using all.bash with
// GOROOT_FINAL set, because the tests are run before
// the final installation of the files.
cmd := exec.Command(testenv.GoToolPath(t), "env", "GOROOT")
cmd.Env = []string{}
out, err := cmd.CombinedOutput()
if err != nil && bytes.Contains(out, []byte("cannot find GOROOT")) {
t.Skipf("skipping because GOROOT=%s does not exist", runtime.GOROOT())
}
_, file, _, _ := runtime.Caller(1)
t.Logf("package testing source file: %s", file)
t.Fatalf("failed to load Go runtime support: %s\n%s", firstLine, got)
}
// Extract named BEGIN...END blocks from output
partRe := regexp.MustCompile(`(?ms)^BEGIN ([^\n]*)\n(.*?)\nEND`)
blocks := map[string]string{}
for _, subs := range partRe.FindAllSubmatch(got, -1) {
blocks[string(subs[1])] = string(subs[2])
}
infoGoroutinesRe := regexp.MustCompile(`\*\s+\d+\s+running\s+`)
if bl := blocks["info goroutines"]; !infoGoroutinesRe.MatchString(bl) {
t.Fatalf("info goroutines failed: %s", bl)
}
printMapvarRe1 := regexp.MustCompile(`\Q = map[string]string = {["abc"] = "def", ["ghi"] = "jkl"}\E$`)
printMapvarRe2 := regexp.MustCompile(`\Q = map[string]string = {["ghi"] = "jkl", ["abc"] = "def"}\E$`)
if bl := blocks["print mapvar"]; !printMapvarRe1.MatchString(bl) &&
!printMapvarRe2.MatchString(bl) {
t.Fatalf("print mapvar failed: %s", bl)
}
strVarRe := regexp.MustCompile(`\Q = "abc"\E$`)
if bl := blocks["print strvar"]; !strVarRe.MatchString(bl) {
t.Fatalf("print strvar failed: %s", bl)
}
// The exact format of composite values has changed over time.
// For issue 16338: ssa decompose phase split a slice into
// a collection of scalar vars holding its fields. In such cases
// the DWARF variable location expression should be of the
// form "var.field" and not just "field".
// However, the newer dwarf location list code reconstituted
// aggregates from their fields and reverted their printing
// back to its original form.
infoLocalsRe := regexp.MustCompile(`slicevar *= *\[\]string *= *{"def"}`)
if bl := blocks["info locals"]; !infoLocalsRe.MatchString(bl) {
t.Fatalf("info locals failed: %s", bl)
}
btGoroutine1Re := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?fmt\.Println.+at`)
if bl := blocks["goroutine 1 bt"]; !btGoroutine1Re.MatchString(bl) {
t.Fatalf("goroutine 1 bt failed: %s", bl)
}
btGoroutine2Re := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?runtime.+at`)
if bl := blocks["goroutine 2 bt"]; !btGoroutine2Re.MatchString(bl) {
t.Fatalf("goroutine 2 bt failed: %s", bl)
}
btGoroutine1AtTheEndRe := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?main\.main.+at`)
if bl := blocks["goroutine 1 bt at the end"]; !btGoroutine1AtTheEndRe.MatchString(bl) {
t.Fatalf("goroutine 1 bt at the end failed: %s", bl)
}
}
const backtraceSource = `
package main
//go:noinline
func aaa() bool { return bbb() }
//go:noinline
func bbb() bool { return ccc() }
//go:noinline
func ccc() bool { return ddd() }
//go:noinline
func ddd() bool { return f() }
//go:noinline
func eee() bool { return true }
var f = eee
func main() {
_ = aaa()
}
`
// TestGdbBacktrace tests that gdb can unwind the stack correctly
// using only the DWARF debug info.
func TestGdbBacktrace(t *testing.T) {
if runtime.GOOS == "netbsd" {
testenv.SkipFlaky(t, 15603)
}
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
// Build the source code.
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, []byte(backtraceSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "break main.eee",
"-ex", "run",
"-ex", "backtrace",
"-ex", "continue",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
// Check that the backtrace matches the source code.
bt := []string{
"eee",
"ddd",
"ccc",
"bbb",
"aaa",
"main",
}
for i, name := range bt {
s := fmt.Sprintf("#%v.*main\\.%v", i, name)
re := regexp.MustCompile(s)
if found := re.Find(got) != nil; !found {
t.Errorf("could not find '%v' in backtrace", s)
t.Fatalf("gdb output:\n%v", string(got))
}
}
}
const autotmpTypeSource = `
package main
type astruct struct {
a, b int
}
func main() {
var iface interface{} = map[string]astruct{}
var iface2 interface{} = []astruct{}
println(iface, iface2)
}
`
// TestGdbAutotmpTypes ensures that types of autotmp variables appear in .debug_info
// See bug #17830.
func TestGdbAutotmpTypes(t *testing.T) {
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
// Build the source code.
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, []byte(autotmpTypeSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags=all=-N -l", "-o", "a.exe")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "break main.main",
"-ex", "run",
"-ex", "step",
"-ex", "info types astruct",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
sgot := string(got)
// Check that the backtrace matches the source code.
types := []string{
"struct []main.astruct;",
"struct bucket<string,main.astruct>;",
"struct hash<string,main.astruct>;",
"struct main.astruct;",
"typedef struct hash<string,main.astruct> * map[string]main.astruct;",
}
for _, name := range types {
if !strings.Contains(sgot, name) {
t.Errorf("could not find %s in 'info typrs astruct' output", name)
t.Fatalf("gdb output:\n%v", sgot)
}
}
}
const constsSource = `
package main
const aConstant int = 42
const largeConstant uint64 = ^uint64(0)
const minusOne int64 = -1
func main() {
println("hello world")
}
`
func TestGdbConst(t *testing.T) {
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
// Build the source code.
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, []byte(constsSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags=all=-N -l", "-o", "a.exe")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "break main.main",
"-ex", "run",
"-ex", "print main.aConstant",
"-ex", "print main.largeConstant",
"-ex", "print main.minusOne",
"-ex", "print 'runtime._MSpanInUse'",
"-ex", "print 'runtime._PageSize'",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
sgot := strings.Replace(string(got), "\r\n", "\n", -1)
t.Logf("output %q", sgot)
if !strings.Contains(sgot, "\n$1 = 42\n$2 = 18446744073709551615\n$3 = -1\n$4 = 1 '\\001'\n$5 = 8192") {
t.Fatalf("output mismatch")
}
}
const panicSource = `
package main
import "runtime/debug"
func main() {
debug.SetTraceback("crash")
crash()
}
func crash() {
panic("panic!")
}
`
// TestGdbPanic tests that gdb can unwind the stack correctly
// from SIGABRTs from Go panics.
func TestGdbPanic(t *testing.T) {
checkGdbEnvironment(t)
t.Parallel()
checkGdbVersion(t)
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
}
defer os.RemoveAll(dir)
// Build the source code.
src := filepath.Join(dir, "main.go")
err = ioutil.WriteFile(src, []byte(panicSource), 0644)
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe")
cmd.Dir = dir
out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
if err != nil {
t.Fatalf("building source %v\n%s", err, out)
}
// Execute gdb commands.
args := []string{"-nx", "-batch",
"-iex", "add-auto-load-safe-path " + filepath.Join(runtime.GOROOT(), "src", "runtime"),
"-ex", "set startup-with-shell off",
"-ex", "run",
"-ex", "backtrace",
filepath.Join(dir, "a.exe"),
}
got, _ := exec.Command("gdb", args...).CombinedOutput()
// Check that the backtrace matches the source code.
bt := []string{
`crash`,
`main`,
}
for _, name := range bt {
s := fmt.Sprintf("#.* .* in main\\.%v", name)
re := regexp.MustCompile(s)
if found := re.Find(got) != nil; !found {
t.Errorf("could not find '%v' in backtrace", s)
t.Fatalf("gdb output:\n%v", string(got))
}
}
}
| [
"\"GOROOT_FINAL\""
]
| []
| [
"GOROOT_FINAL"
]
| [] | ["GOROOT_FINAL"] | go | 1 | 0 | |
ironic/tests/unit/common/test_utils.py | # Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import errno
import hashlib
import os
import os.path
import shutil
import tempfile
import jinja2
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import netutils
import six
from ironic.common import exception
from ironic.common import utils
from ironic.tests import base
CONF = cfg.CONF
class BareMetalUtilsTestCase(base.TestCase):
def test_create_link(self):
with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.return_value = None
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
def test_create_link_EEXIST(self):
with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.side_effect = OSError(errno.EEXIST)
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
class ExecuteTestCase(base.TestCase):
@mock.patch.object(processutils, 'execute', autospec=True)
@mock.patch.object(os.environ, 'copy', return_value={}, autospec=True)
def test_execute_use_standard_locale_no_env_variables(self, env_mock,
execute_mock):
utils.execute('foo', use_standard_locale=True)
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C'})
@mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_use_standard_locale_with_env_variables(self,
execute_mock):
utils.execute('foo', use_standard_locale=True,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C',
'foo': 'bar'})
@mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_not_use_standard_locale(self, execute_mock):
utils.execute('foo', use_standard_locale=False,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'foo': 'bar'})
def test_execute_get_root_helper(self):
with mock.patch.object(
processutils, 'execute', autospec=True) as execute_mock:
helper = utils._get_root_helper()
utils.execute('foo', run_as_root=True)
execute_mock.assert_called_once_with('foo', run_as_root=True,
root_helper=helper)
def test_execute_without_root_helper(self):
with mock.patch.object(
processutils, 'execute', autospec=True) as execute_mock:
utils.execute('foo', run_as_root=False)
execute_mock.assert_called_once_with('foo', run_as_root=False)
class GenericUtilsTestCase(base.TestCase):
@mock.patch.object(utils, 'hashlib', autospec=True)
def test__get_hash_object(self, hashlib_mock):
algorithms_available = ('md5', 'sha1', 'sha224',
'sha256', 'sha384', 'sha512')
hashlib_mock.algorithms_guaranteed = algorithms_available
hashlib_mock.algorithms = algorithms_available
# | WHEN |
utils._get_hash_object('md5')
utils._get_hash_object('sha1')
utils._get_hash_object('sha224')
utils._get_hash_object('sha256')
utils._get_hash_object('sha384')
utils._get_hash_object('sha512')
# | THEN |
calls = [mock.call.md5(), mock.call.sha1(), mock.call.sha224(),
mock.call.sha256(), mock.call.sha384(), mock.call.sha512()]
hashlib_mock.assert_has_calls(calls)
def test__get_hash_object_throws_for_invalid_or_unsupported_hash_name(
self):
# | WHEN | & | THEN |
self.assertRaises(exception.InvalidParameterValue,
utils._get_hash_object,
'hickory-dickory-dock')
def test_hash_file_for_md5(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.md5(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object) # using default, 'md5'
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_md5_not_binary(self):
# | GIVEN |
data = u'Mary had a little lamb, its fleece as white as sno\u0449'
file_like_object = six.StringIO(data)
expected = hashlib.md5(data.encode('utf-8')).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object) # using default, 'md5'
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_sha1(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.sha1(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object, 'sha1')
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_sha512(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.sha512(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object, 'sha512')
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_throws_for_invalid_or_unsupported_hash(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
# | WHEN | & | THEN |
self.assertRaises(exception.InvalidParameterValue, utils.hash_file,
file_like_object, 'hickory-dickory-dock')
def test_file_has_content_equal(self):
data = b'Mary had a little lamb, its fleece as white as snow'
ref = data
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertTrue(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_file_has_content_equal_not_binary(self):
data = u'Mary had a little lamb, its fleece as white as sno\u0449'
ref = data
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertTrue(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_file_has_content_differ(self):
data = b'Mary had a little lamb, its fleece as white as snow'
ref = data + b'!'
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertFalse(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_is_valid_datapath_id(self):
self.assertTrue(utils.is_valid_datapath_id("525400cf2d319fdf"))
self.assertTrue(utils.is_valid_datapath_id("525400CF2D319FDF"))
self.assertFalse(utils.is_valid_datapath_id("52"))
self.assertFalse(utils.is_valid_datapath_id("52:54:00:cf:2d:31"))
self.assertFalse(utils.is_valid_datapath_id("notadatapathid00"))
self.assertFalse(utils.is_valid_datapath_id("5525400CF2D319FDF"))
def test_is_hostname_safe(self):
self.assertTrue(utils.is_hostname_safe('spam'))
self.assertFalse(utils.is_hostname_safe('spAm'))
self.assertFalse(utils.is_hostname_safe('SPAM'))
self.assertFalse(utils.is_hostname_safe('-spam'))
self.assertFalse(utils.is_hostname_safe('spam-'))
self.assertTrue(utils.is_hostname_safe('spam-eggs'))
self.assertFalse(utils.is_hostname_safe('spam_eggs'))
self.assertFalse(utils.is_hostname_safe('spam eggs'))
self.assertTrue(utils.is_hostname_safe('spam.eggs'))
self.assertTrue(utils.is_hostname_safe('9spam'))
self.assertTrue(utils.is_hostname_safe('spam7'))
self.assertTrue(utils.is_hostname_safe('br34kf4st'))
self.assertFalse(utils.is_hostname_safe('$pam'))
self.assertFalse(utils.is_hostname_safe('egg$'))
self.assertFalse(utils.is_hostname_safe('spam#eggs'))
self.assertFalse(utils.is_hostname_safe(' eggs'))
self.assertFalse(utils.is_hostname_safe('spam '))
self.assertTrue(utils.is_hostname_safe('s'))
self.assertTrue(utils.is_hostname_safe('s' * 63))
self.assertFalse(utils.is_hostname_safe('s' * 64))
self.assertFalse(utils.is_hostname_safe(''))
self.assertFalse(utils.is_hostname_safe(None))
# Need to ensure a binary response for success or fail
self.assertIsNotNone(utils.is_hostname_safe('spam'))
self.assertIsNotNone(utils.is_hostname_safe('-spam'))
self.assertTrue(utils.is_hostname_safe('www.rackspace.com'))
self.assertTrue(utils.is_hostname_safe('www.rackspace.com.'))
self.assertTrue(utils.is_hostname_safe('http._sctp.www.example.com'))
self.assertTrue(utils.is_hostname_safe('mail.pets_r_us.net'))
self.assertTrue(utils.is_hostname_safe('mail-server-15.my_host.org'))
self.assertFalse(utils.is_hostname_safe('www.nothere.com_'))
self.assertFalse(utils.is_hostname_safe('www.nothere_.com'))
self.assertFalse(utils.is_hostname_safe('www..nothere.com'))
long_str = 'a' * 63 + '.' + 'b' * 63 + '.' + 'c' * 63 + '.' + 'd' * 63
self.assertTrue(utils.is_hostname_safe(long_str))
self.assertFalse(utils.is_hostname_safe(long_str + '.'))
self.assertFalse(utils.is_hostname_safe('a' * 255))
def test_is_valid_logical_name(self):
valid = (
'spam', 'spAm', 'SPAM', 'spam-eggs', 'spam.eggs', 'spam_eggs',
'spam~eggs', '9spam', 'spam7', '~spam', '.spam', '.~-_', '~',
'br34kf4st', 's', 's' * 63, 's' * 255)
invalid = (
' ', 'spam eggs', '$pam', 'egg$', 'spam#eggs',
' eggs', 'spam ', '', None, 'spam%20')
for hostname in valid:
result = utils.is_valid_logical_name(hostname)
# Need to ensure a binary response for success. assertTrue
# is too generous, and would pass this test if, for
# instance, a regex Match object were returned.
self.assertIs(result, True,
"%s is unexpectedly invalid" % hostname)
for hostname in invalid:
result = utils.is_valid_logical_name(hostname)
# Need to ensure a binary response for
# success. assertFalse is too generous and would pass this
# test if None were returned.
self.assertIs(result, False,
"%s is unexpectedly valid" % hostname)
def test_validate_and_normalize_mac(self):
mac = 'AA:BB:CC:DD:EE:FF'
with mock.patch.object(netutils, 'is_valid_mac',
autospec=True) as m_mock:
m_mock.return_value = True
self.assertEqual(mac.lower(),
utils.validate_and_normalize_mac(mac))
def test_validate_and_normalize_datapath_id(self):
datapath_id = 'AA:BB:CC:DD:EE:FF'
with mock.patch.object(utils, 'is_valid_datapath_id',
autospec=True) as m_mock:
m_mock.return_value = True
self.assertEqual(datapath_id.lower(),
utils.validate_and_normalize_datapath_id(
datapath_id))
def test_validate_and_normalize_mac_invalid_format(self):
with mock.patch.object(netutils, 'is_valid_mac',
autospec=True) as m_mock:
m_mock.return_value = False
self.assertRaises(exception.InvalidMAC,
utils.validate_and_normalize_mac, 'invalid-mac')
def test_safe_rstrip(self):
value = '/test/'
rstripped_value = '/test'
not_rstripped = '/'
self.assertEqual(rstripped_value, utils.safe_rstrip(value, '/'))
self.assertEqual(not_rstripped, utils.safe_rstrip(not_rstripped, '/'))
def test_safe_rstrip_not_raises_exceptions(self):
# Supplying an integer should normally raise an exception because it
# does not save the rstrip() method.
value = 10
# In the case of raising an exception safe_rstrip() should return the
# original value.
self.assertEqual(value, utils.safe_rstrip(value))
@mock.patch.object(os.path, 'getmtime', return_value=1439465889.4964755,
autospec=True)
def test_unix_file_modification_datetime(self, mtime_mock):
expected = datetime.datetime(2015, 8, 13, 11, 38, 9, 496475)
self.assertEqual(expected,
utils.unix_file_modification_datetime('foo'))
mtime_mock.assert_called_once_with('foo')
def test_is_valid_no_proxy(self):
# Valid values for 'no_proxy'
valid_no_proxy = [
('a' * 63 + '.' + '0' * 63 + '.c.' + 'd' * 61 + '.' + 'e' * 61),
('A' * 63 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.' + 'E' * 61),
('.' + 'a' * 62 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61),
',,example.com:3128,',
'192.168.1.1', # IP should be valid
]
# Test each one individually, so if failure easier to determine which
# one failed.
for no_proxy in valid_no_proxy:
self.assertTrue(
utils.is_valid_no_proxy(no_proxy),
msg="'no_proxy' value should be valid: {}".format(no_proxy))
# Test valid when joined together
self.assertTrue(utils.is_valid_no_proxy(','.join(valid_no_proxy)))
# Test valid when joined together with whitespace
self.assertTrue(utils.is_valid_no_proxy(' , '.join(valid_no_proxy)))
# empty string should also be valid
self.assertTrue(utils.is_valid_no_proxy(''))
# Invalid values for 'no_proxy'
invalid_no_proxy = [
('A' * 64 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.'
+ 'E' * 61), # too long (> 253)
('a' * 100),
'a..com',
('.' + 'a' * 63 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61), # too long (> 251 after deleting .)
('*.' + 'a' * 60 + '.' + '0' * 60 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61), # starts with *.
'c.-a.com',
'c.a-.com',
]
for no_proxy in invalid_no_proxy:
self.assertFalse(
utils.is_valid_no_proxy(no_proxy),
msg="'no_proxy' value should be invalid: {}".format(no_proxy))
@mock.patch.object(utils, 'LOG', autospec=True)
def test_warn_about_deprecated_extra_vif_port_id(self, mock_log):
# Set variable to default value
utils.warn_deprecated_extra_vif_port_id = False
utils.warn_about_deprecated_extra_vif_port_id()
utils.warn_about_deprecated_extra_vif_port_id()
self.assertEqual(1, mock_log.warning.call_count)
self.assertIn("extra['vif_port_id'] is deprecated and will not",
mock_log.warning.call_args[0][0])
class TempFilesTestCase(base.TestCase):
def test_tempdir(self):
dirname = None
with utils.tempdir() as tempdir:
self.assertTrue(os.path.isdir(tempdir))
dirname = tempdir
self.assertFalse(os.path.exists(dirname))
@mock.patch.object(shutil, 'rmtree', autospec=True)
@mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
kwargs = {'dir': 'b'}
with utils.tempdir(**kwargs) as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
mkdtemp_mock.assert_called_once_with(**kwargs)
rmtree_mock.assert_called_once_with(tempdir_created)
@mock.patch.object(utils, 'LOG', autospec=True)
@mock.patch.object(shutil, 'rmtree', autospec=True)
@mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock,
log_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
rmtree_mock.side_effect = OSError
with utils.tempdir() as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
rmtree_mock.assert_called_once_with(tempdir_created)
self.assertTrue(log_mock.error.called)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_with_pass_in(self, mock_free_space, mock_dir_writable,
mock_exists):
mock_exists.return_value = True
# test passing in a directory and size
utils.check_dir(directory_to_check='/fake/path', required_space=5)
mock_exists.assert_called_once_with('/fake/path')
mock_dir_writable.assert_called_once_with('/fake/path')
mock_free_space.assert_called_once_with('/fake/path', 5)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_no_dir(self, mock_free_space, mock_dir_writable):
self.config(tempdir='/fake/path')
# NOTE(dtantsur): self.config uses os.path.exists, so we cannot mock
# on the method level.
with mock.patch.object(os.path, 'exists',
autospec=True) as mock_exists:
mock_exists.return_value = False
self.assertRaises(exception.PathNotFound, utils.check_dir)
mock_exists.assert_called_once_with(CONF.tempdir)
self.assertFalse(mock_free_space.called)
self.assertFalse(mock_dir_writable.called)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_ok(self, mock_free_space, mock_dir_writable):
self.config(tempdir='/fake/path')
# NOTE(dtantsur): self.config uses os.path.exists, so we cannot mock
# on the method level.
with mock.patch.object(os.path, 'exists',
autospec=True) as mock_exists:
mock_exists.return_value = True
utils.check_dir()
mock_exists.assert_called_once_with(CONF.tempdir)
mock_dir_writable.assert_called_once_with(CONF.tempdir)
mock_free_space.assert_called_once_with(CONF.tempdir, 1)
@mock.patch.object(os, 'access', autospec=True)
def test__check_dir_writable_ok(self, mock_access):
mock_access.return_value = True
self.assertIsNone(utils._check_dir_writable("/fake/path"))
mock_access.assert_called_once_with("/fake/path", os.W_OK)
@mock.patch.object(os, 'access', autospec=True)
def test__check_dir_writable_not_writable(self, mock_access):
mock_access.return_value = False
self.assertRaises(exception.DirectoryNotWritable,
utils._check_dir_writable, "/fake/path")
mock_access.assert_called_once_with("/fake/path", os.W_OK)
@mock.patch.object(os, 'statvfs', autospec=True)
def test__check_dir_free_space_ok(self, mock_stat):
statvfs_mock_return = mock.MagicMock()
statvfs_mock_return.f_bsize = 5
statvfs_mock_return.f_frsize = 0
statvfs_mock_return.f_blocks = 0
statvfs_mock_return.f_bfree = 0
statvfs_mock_return.f_bavail = 1024 * 1024
statvfs_mock_return.f_files = 0
statvfs_mock_return.f_ffree = 0
statvfs_mock_return.f_favail = 0
statvfs_mock_return.f_flag = 0
statvfs_mock_return.f_namemax = 0
mock_stat.return_value = statvfs_mock_return
utils._check_dir_free_space("/fake/path")
mock_stat.assert_called_once_with("/fake/path")
@mock.patch.object(os, 'statvfs', autospec=True)
def test_check_dir_free_space_raises(self, mock_stat):
statvfs_mock_return = mock.MagicMock()
statvfs_mock_return.f_bsize = 1
statvfs_mock_return.f_frsize = 0
statvfs_mock_return.f_blocks = 0
statvfs_mock_return.f_bfree = 0
statvfs_mock_return.f_bavail = 1024
statvfs_mock_return.f_files = 0
statvfs_mock_return.f_ffree = 0
statvfs_mock_return.f_favail = 0
statvfs_mock_return.f_flag = 0
statvfs_mock_return.f_namemax = 0
mock_stat.return_value = statvfs_mock_return
self.assertRaises(exception.InsufficientDiskSpace,
utils._check_dir_free_space, "/fake/path")
mock_stat.assert_called_once_with("/fake/path")
class GetUpdatedCapabilitiesTestCase(base.TestCase):
def test_get_updated_capabilities(self):
capabilities = {'ilo_firmware_version': 'xyz'}
cap_string = 'ilo_firmware_version:xyz'
cap_returned = utils.get_updated_capabilities(None, capabilities)
self.assertEqual(cap_string, cap_returned)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_multiple_keys(self):
capabilities = {'ilo_firmware_version': 'xyz',
'foo': 'bar', 'somekey': 'value'}
cap_string = 'ilo_firmware_version:xyz,foo:bar,somekey:value'
cap_returned = utils.get_updated_capabilities(None, capabilities)
set1 = set(cap_string.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_invalid_capabilities(self):
capabilities = 'ilo_firmware_version'
self.assertRaises(ValueError,
utils.get_updated_capabilities,
capabilities, {})
def test_get_updated_capabilities_capabilities_not_dict(self):
capabilities = ['ilo_firmware_version:xyz', 'foo:bar']
self.assertRaises(ValueError,
utils.get_updated_capabilities,
None, capabilities)
def test_get_updated_capabilities_add_to_existing_capabilities(self):
new_capabilities = {'BootMode': 'uefi'}
expected_capabilities = 'BootMode:uefi,foo:bar'
cap_returned = utils.get_updated_capabilities('foo:bar',
new_capabilities)
set1 = set(expected_capabilities.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_replace_to_existing_capabilities(self):
new_capabilities = {'BootMode': 'bios'}
expected_capabilities = 'BootMode:bios'
cap_returned = utils.get_updated_capabilities('BootMode:uefi',
new_capabilities)
set1 = set(expected_capabilities.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_validate_network_port(self):
port = utils.validate_network_port('0', 'message')
self.assertEqual(0, port)
port = utils.validate_network_port('65535')
self.assertEqual(65535, port)
def test_validate_network_port_fail(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Port "65536" is not a valid port.',
utils.validate_network_port,
'65536')
self.assertRaisesRegex(exception.InvalidParameterValue,
'fake_port "-1" is not a valid port.',
utils.validate_network_port,
'-1',
'fake_port')
self.assertRaisesRegex(exception.InvalidParameterValue,
'Port "invalid" is not a valid port.',
utils.validate_network_port,
'invalid')
class JinjaTemplatingTestCase(base.TestCase):
def setUp(self):
super(JinjaTemplatingTestCase, self).setUp()
self.template = '{{ foo }} {{ bar }}'
self.params = {'foo': 'spam', 'bar': 'ham'}
self.expected = 'spam ham'
def test_render_string(self):
self.assertEqual(self.expected,
utils.render_template(self.template,
self.params,
is_file=False))
@mock.patch('ironic.common.utils.jinja2.FileSystemLoader', autospec=True)
def test_render_file(self, jinja_fsl_mock):
path = '/path/to/template.j2'
jinja_fsl_mock.return_value = jinja2.DictLoader(
{'template.j2': self.template})
self.assertEqual(self.expected,
utils.render_template(path,
self.params))
jinja_fsl_mock.assert_called_once_with('/path/to')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
SampleEncodeMultiThread.py | #
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == 'nt':
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file = sys.stderr)
print("Can't set CUDA DLLs search path.", file = sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(';')
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file = sys.stderr)
exit(1)
import PyNvCodec as nvc
import numpy as np
from threading import Thread
class Worker(Thread):
def __init__(self, gpuID, width, height, rawFilePath, encFilePath):
Thread.__init__(self)
res = width + 'x' + height
self.nvUpl = nvc.PyFrameUploader(int(width), int(height), nvc.PixelFormat.YUV420, gpuID)
self.nvCvt = nvc.PySurfaceConverter(int(width), int(height), nvc.PixelFormat.YUV420, nvc.PixelFormat.NV12, gpuID)
self.nvEnc = nvc.PyNvEncoder({'preset': 'hq', 'codec': 'h264', 's': res}, gpuID)
self.encFile = open(encFilePath, "wb")
self.rawFile = open(rawFilePath, "rb")
def run(self):
try:
while True:
frameSize = self.nvEnc.Width() * self.nvEnc.Height() * 3 / 2
rawFrame = np.fromfile(self.rawFile, np.uint8, count = int(frameSize))
if not (rawFrame.size):
print('No more video frames')
break
rawSurface = self.nvUpl.UploadSingleFrame(rawFrame)
if (rawSurface.Empty()):
print('Failed to upload video frame to GPU')
break
cvtSurface = self.nvCvt.Execute(rawSurface)
if (cvtSurface.Empty()):
print('Failed to do color conversion')
break
encFrame = np.ndarray(shape=(0), dtype=np.uint8)
success = self.nvEnc.EncodeSingleSurface(cvtSurface, encFrame)
if(success):
bits = bytearray(encFrame)
self.encFile.write(bits)
#Encoder is asynchronous, so we need to flush it
encFrame = np.ndarray(shape=(0), dtype=np.uint8)
success = self.nvEnc.Flush(encFrame)
if(success):
bits = bytearray(encFrame)
self.encFile.write(bits)
except Exception as e:
print(getattr(e, 'message', str(e)))
decFile.close()
def create_threads(gpu_id1, width_1, height_1, input_file1, output_file1,
gpu_id2, width_2, height_2, input_file2, output_file2):
th1 = Worker(gpu_id1, width_1, height_1, input_file1, output_file1)
th2 = Worker(gpu_id2, width_2, height_2, input_file2, output_file2)
th1.start()
th2.start()
th1.join()
th2.join()
if __name__ == "__main__":
print("This sample encodes 2 videos simultaneously from YUV files into 1/4 of initial size.")
print("Usage: SampleDecode.py $gpu_id1 $width_1 $height_1 $input_file1 $output_file_1 $gpu_id2 $width_2 $height_2 $input_file2 $output_file2")
if(len(sys.argv) < 11):
print("Provide input CLI arguments as shown above")
exit(1)
gpu_1 = int(sys.argv[1])
width_1 = sys.argv[2]
height_1 = sys.argv[3]
input_1 = sys.argv[4]
output_1 = sys.argv[5]
gpu_2 = int(sys.argv[6])
width_2 = sys.argv[7]
height_2 = sys.argv[8]
input_2 = sys.argv[9]
output_2 = sys.argv[10]
create_threads(gpu_1, width_1, height_1, input_1, output_1, gpu_2, width_2, height_2, input_2, output_2)
| []
| []
| [
"CUDA_PATH",
"PATH"
]
| [] | ["CUDA_PATH", "PATH"] | python | 2 | 0 | |
file_info_query_test.go | package hedera
import (
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func TestNewFileInfoQuery(t *testing.T) {
mockTransaction, err := newMockTransaction()
assert.NoError(t, err)
query := NewFileInfoQuery().
SetFileID(FileID{File: 3}).
SetQueryPaymentTransaction(mockTransaction)
assert.Equal(t, `fileGetInfo:<header:<payment:<bodyBytes:"\n\016\n\010\010\334\311\007\020\333\237\t\022\002\030\003\022\002\030\003\030\200\302\327/\"\002\010xr\024\n\022\n\007\n\002\030\002\020\307\001\n\007\n\002\030\003\020\310\001" sigMap:<sigPair:<pubKeyPrefix:"\344\361\300\353L}\315\303\347\353\021p\263\010\212=\022\242\227\364\243\353\342\362\205\003\375g5F\355\216" ed25519:"\022&5\226\373\264\034]P\273%\354P\233k\315\231\013\337\274\254)\246+\322<\227+\273\214\212f\313\332i\027T4{\367\363UYn\n\217\253ep\004\366\203\017\272FUP\243\321/\035\235\032\013" > > > > fileID:<fileNum:3 > > `, query.QueryBuilder.pb.String())
}
func TestFileInfoQueryTransaction_Execute(t *testing.T) {
operatorAccountID, err := AccountIDFromString(os.Getenv("OPERATOR_ID"))
assert.NoError(t, err)
operatorPrivateKey, err := Ed25519PrivateKeyFromString(os.Getenv("OPERATOR_KEY"))
assert.NoError(t, err)
client := ClientForTestnet().
SetOperator(operatorAccountID, operatorPrivateKey).
SetMaxTransactionFee(NewHbar(2))
txID, err := NewFileCreateTransaction().
AddKey(operatorPrivateKey.PublicKey()).
SetContents([]byte("Hello, World")).
SetTransactionMemo("go sdk e2e tests").
Execute(client)
assert.NoError(t, err)
receipt, err := txID.GetReceipt(client)
assert.NoError(t, err)
fileID := receipt.fileID
assert.NotNil(t, fileID)
info, err := NewFileInfoQuery().
SetFileID(*fileID).
Execute(client)
assert.NoError(t, err)
assert.Equal(t, *fileID, info.FileID)
txID, err = NewFileDeleteTransaction().
SetFileID(*fileID).
Execute(client)
assert.NoError(t, err)
_, err = txID.GetReceipt(client)
assert.NoError(t, err)
}
| [
"\"OPERATOR_ID\"",
"\"OPERATOR_KEY\""
]
| []
| [
"OPERATOR_ID",
"OPERATOR_KEY"
]
| [] | ["OPERATOR_ID", "OPERATOR_KEY"] | go | 2 | 0 | |
IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/web/twcgi.py | # -*- test-case-name: twisted.web.test.test_cgi -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I hold resource classes and helper classes that deal with CGI scripts.
"""
# System Imports
import os
import urllib
# Twisted Imports
from twisted.web import http
from twisted.internet import protocol
from twisted.spread import pb
from twisted.python import log, filepath
from twisted.web import resource, server, static
class CGIDirectory(resource.Resource, filepath.FilePath):
def __init__(self, pathname):
resource.Resource.__init__(self)
filepath.FilePath.__init__(self, pathname)
def getChild(self, path, request):
fnp = self.child(path)
if not fnp.exists():
return static.File.childNotFound
elif fnp.isdir():
return CGIDirectory(fnp.path)
else:
return CGIScript(fnp.path)
return resource.NoResource()
def render(self, request):
notFound = resource.NoResource(
"CGI directories do not support directory listing.")
return notFound.render(request)
class CGIScript(resource.Resource):
"""
L{CGIScript} is a resource which runs child processes according to the CGI
specification.
The implementation is complex due to the fact that it requires asynchronous
IPC with an external process with an unpleasant protocol.
"""
isLeaf = 1
def __init__(self, filename, registry=None, reactor=None):
"""
Initialize, with the name of a CGI script file.
"""
self.filename = filename
if reactor is None:
# This installs a default reactor, if None was installed before.
# We do a late import here, so that importing the current module
# won't directly trigger installing a default reactor.
from twisted.internet import reactor
self._reactor = reactor
def render(self, request):
"""
Do various things to conform to the CGI specification.
I will set up the usual slew of environment variables, then spin off a
process.
@type request: L{twisted.web.http.Request}
@param request: An HTTP request.
"""
script_name = "/" + "/".join(request.prepath)
serverName = request.getRequestHostname().split(':')[0]
env = {"SERVER_SOFTWARE": server.version,
"SERVER_NAME": serverName,
"GATEWAY_INTERFACE": "CGI/1.1",
"SERVER_PROTOCOL": request.clientproto,
"SERVER_PORT": str(request.getHost().port),
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": script_name, # XXX
"SCRIPT_FILENAME": self.filename,
"REQUEST_URI": request.uri,
}
client = request.getClient()
if client is not None:
env['REMOTE_HOST'] = client
ip = request.getClientIP()
if ip is not None:
env['REMOTE_ADDR'] = ip
pp = request.postpath
if pp:
env["PATH_INFO"] = "/" + "/".join(pp)
if hasattr(request, "content"):
# request.content is either a StringIO or a TemporaryFile, and
# the file pointer is sitting at the beginning (seek(0,0))
request.content.seek(0,2)
length = request.content.tell()
request.content.seek(0,0)
env['CONTENT_LENGTH'] = str(length)
try:
qindex = request.uri.index('?')
except ValueError:
env['QUERY_STRING'] = ''
qargs = []
else:
qs = env['QUERY_STRING'] = request.uri[qindex+1:]
if '=' in qs:
qargs = []
else:
qargs = [urllib.unquote(x) for x in qs.split('+')]
# Propogate HTTP headers
for title, header in request.getAllHeaders().items():
envname = title.replace('-', '_').upper()
if title not in ('content-type', 'content-length'):
envname = "HTTP_" + envname
env[envname] = header
# Propogate our environment
for key, value in os.environ.items():
if key not in env:
env[key] = value
# And they're off!
self.runProcess(env, request, qargs)
return server.NOT_DONE_YET
def runProcess(self, env, request, qargs=[]):
"""
Run the cgi script.
@type env: A C{dict} of C{str}, or C{None}
@param env: The environment variables to pass to the processs that will
get spawned. See
L{twisted.internet.interfaces.IReactorProcess.spawnProcess} for more
information about environments and process creation.
@type request: L{twisted.web.http.Request}
@param request: An HTTP request.
@type qargs: A C{list} of C{str}
@param qargs: The command line arguments to pass to the process that
will get spawned.
"""
p = CGIProcessProtocol(request)
self._reactor.spawnProcess(p, self.filename, [self.filename] + qargs,
env, os.path.dirname(self.filename))
class FilteredScript(CGIScript):
"""
I am a special version of a CGI script, that uses a specific executable.
This is useful for interfacing with other scripting languages that adhere to
the CGI standard. My C{filter} attribute specifies what executable to run,
and my C{filename} init parameter describes which script to pass to the
first argument of that script.
To customize me for a particular location of a CGI interpreter, override
C{filter}.
@type filter: C{str}
@ivar filter: The absolute path to the executable.
"""
filter = '/usr/bin/cat'
def runProcess(self, env, request, qargs=[]):
"""
Run a script through the C{filter} executable.
@type env: A C{dict} of C{str}, or C{None}
@param env: The environment variables to pass to the processs that will
get spawned. See
L{twisted.internet.interfaces.IReactorProcess.spawnProcess} for more
information about environments and process creation.
@type request: L{twisted.web.http.Request}
@param request: An HTTP request.
@type qargs: A C{list} of C{str}
@param qargs: The command line arguments to pass to the process that
will get spawned.
"""
p = CGIProcessProtocol(request)
self._reactor.spawnProcess(p, self.filter,
[self.filter, self.filename] + qargs, env,
os.path.dirname(self.filename))
class CGIProcessProtocol(protocol.ProcessProtocol, pb.Viewable):
handling_headers = 1
headers_written = 0
headertext = ''
errortext = ''
# Remotely relay producer interface.
def view_resumeProducing(self, issuer):
self.resumeProducing()
def view_pauseProducing(self, issuer):
self.pauseProducing()
def view_stopProducing(self, issuer):
self.stopProducing()
def resumeProducing(self):
self.transport.resumeProducing()
def pauseProducing(self):
self.transport.pauseProducing()
def stopProducing(self):
self.transport.loseConnection()
def __init__(self, request):
self.request = request
def connectionMade(self):
self.request.registerProducer(self, 1)
self.request.content.seek(0, 0)
content = self.request.content.read()
if content:
self.transport.write(content)
self.transport.closeStdin()
def errReceived(self, error):
self.errortext = self.errortext + error
def outReceived(self, output):
"""
Handle a chunk of input
"""
# First, make sure that the headers from the script are sorted
# out (we'll want to do some parsing on these later.)
if self.handling_headers:
text = self.headertext + output
headerEnds = []
for delimiter in '\n\n','\r\n\r\n','\r\r', '\n\r\n':
headerend = text.find(delimiter)
if headerend != -1:
headerEnds.append((headerend, delimiter))
if headerEnds:
# The script is entirely in control of response headers; disable the
# default Content-Type value normally provided by
# twisted.web.server.Request.
self.request.defaultContentType = None
headerEnds.sort()
headerend, delimiter = headerEnds[0]
self.headertext = text[:headerend]
# This is a final version of the header text.
linebreak = delimiter[:len(delimiter)//2]
headers = self.headertext.split(linebreak)
for header in headers:
br = header.find(': ')
if br == -1:
log.msg(
format='ignoring malformed CGI header: %(header)r',
header=header)
else:
headerName = header[:br].lower()
headerText = header[br+2:]
if headerName == 'location':
self.request.setResponseCode(http.FOUND)
if headerName == 'status':
try:
statusNum = int(headerText[:3]) #"XXX <description>" sometimes happens.
except:
log.msg( "malformed status header" )
else:
self.request.setResponseCode(statusNum)
else:
# Don't allow the application to control these required headers.
if headerName.lower() not in ('server', 'date'):
self.request.responseHeaders.addRawHeader(headerName, headerText)
output = text[headerend+len(delimiter):]
self.handling_headers = 0
if self.handling_headers:
self.headertext = text
if not self.handling_headers:
self.request.write(output)
def processEnded(self, reason):
if reason.value.exitCode != 0:
log.msg("CGI %s exited with exit code %s" %
(self.request.uri, reason.value.exitCode))
if self.errortext:
log.msg("Errors from CGI %s: %s" % (self.request.uri, self.errortext))
if self.handling_headers:
log.msg("Premature end of headers in %s: %s" % (self.request.uri, self.headertext))
self.request.write(
resource.ErrorPage(http.INTERNAL_SERVER_ERROR,
"CGI Script Error",
"Premature end of script headers.").render(self.request))
self.request.unregisterProducer()
self.request.finish()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
chatbot.py | from __future__ import print_function
import numpy as np
import tensorflow as tf
import argparse
import os
import pickle
import copy
import sys
import html
from model import Model
def main():
assert sys.version_info >= (3, 3), \
"Must be run in Python 3.3 or later. You are running {}".format(sys.version)
parser = argparse.ArgumentParser()
params = {'save_dir' : 'models/reddit',
'n' : 500,
'beam_width' : 2,
'temperature' : 1.0,
'topn' : -1,
'relevance' : -1}
# parser.add_argument('--save_dir', type=str, default='models/reddit',
# help='model directory to store checkpointed models')
# parser.add_argument('-n', type=int, default=500,
# help='number of characters to sample')
# parser.add_argument('--prime', type=str, default=' ',
# help='prime text')
# parser.add_argument('--beam_width', type=int, default=2,
# help='Width of the beam for beam search, default 2')
# parser.add_argument('--temperature', type=float, default=1.0,
# help='sampling temperature'
# '(lower is more conservative, default is 1.0, which is neutral)')
# parser.add_argument('--topn', type=int, default=-1,
# help='at each step, choose from only this many most likely characters;'
# 'set to <0 to disable top-n filtering.')
# parser.add_argument('--relevance', type=float, default=-1.,
# help='amount of "relevance masking/MMI (disabled by default):"'
# 'higher is more pressure, 0.4 is probably as high as it can go without'
# 'noticeably degrading coherence;'
# 'set to <0 to disable relevance masking')
# args = parser.parse_args()
states, sess, net, chars, vocab = sample_main(**params)
return states, sess, net, chars, vocab, params
def get_paths(input_path):
if os.path.isfile(input_path):
# Passed a model rather than a checkpoint directory
model_path = input_path
save_dir = os.path.dirname(model_path)
elif os.path.exists(input_path):
# Passed a checkpoint directory
save_dir = input_path
checkpoint = tf.train.get_checkpoint_state(save_dir)
if checkpoint:
model_path = checkpoint.model_checkpoint_path
else:
raise ValueError('Checkpoint not found in {}.'.format(save_dir))
else:
raise ValueError('save_dir is not a valid path.')
return model_path, os.path.join(save_dir, 'config.pkl'), os.path.join(save_dir, 'chars_vocab.pkl')
def sample_main(save_dir, n, beam_width, temperature, topn, relevance):
model_path, config_path, vocab_path = get_paths(save_dir)
# Arguments passed to sample.py direct us to a saved model.
# Load the separate arguments by which that model was previously trained.
# That's saved_args. Use those to load the model.
with open(config_path, 'rb') as f:
saved_args = pickle.load(f)
# Separately load chars and vocab from the save directory.
with open(vocab_path, 'rb') as f:
chars, vocab = pickle.load(f)
# Create the model from the saved arguments, in inference mode.
print("Creating model...")
saved_args.batch_size = beam_width
net = Model(saved_args, True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Make tensorflow less verbose; filter out info (1+) and warnings (2+) but not errors (3).
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=sess)
saver = tf.train.Saver(net.save_variables_list())
# Restore the saved variables, replacing the initialized values.
print("Restoring weights...")
saver.restore(sess, model_path)
states = initial_state_with_relevance_masking(net, sess, relevance)
return states, sess, net, chars, vocab
def output(inp, states, net, sess, chars, vocab, save_dir, n, beam_width, temperature, topn, relevance):
return chatbot(inp, states, net, sess, chars, vocab, n, beam_width,relevance, temperature, topn)
def initial_state(net, sess):
# Return freshly initialized model states.
return sess.run(net.zero_state)
def forward_text(net, sess, states, relevance, vocab, prime_text=None):
if prime_text is not None:
for char in prime_text:
if relevance > 0.:
# Automatically forward the primary net.
_, states[0] = net.forward_model(sess, states[0], vocab[char])
# If the token is newline, reset the mask net state; else, forward it.
if vocab[char] == '\n':
states[1] = initial_state(net, sess)
else:
_, states[1] = net.forward_model(sess, states[1], vocab[char])
else:
_, states = net.forward_model(sess, states, vocab[char])
return states
def sanitize_text(vocab, text): # Strip out characters that are not part of the net's vocab.
return ''.join(i for i in text if i in vocab)
def initial_state_with_relevance_masking(net, sess, relevance):
if relevance <= 0.: return initial_state(net, sess)
else: return [initial_state(net, sess), initial_state(net, sess)]
def possibly_escaped_char(raw_chars):
if raw_chars[-1] == ';':
for i, c in enumerate(reversed(raw_chars[:-1])):
if c == ';' or i > 8:
return raw_chars[-1]
elif c == '&':
escape_seq = "".join(raw_chars[-(i + 2):])
new_seq = html.unescape(escape_seq)
backspace_seq = "".join(['\b'] * (len(escape_seq)-1))
diff_length = len(escape_seq) - len(new_seq) - 1
return backspace_seq + new_seq + "".join([' '] * diff_length) + "".join(['\b'] * diff_length)
return raw_chars[-1]
def chatbot(user_input, states, net, sess, chars, vocab, max_length, beam_width, relevance, temperature, topn):
reply=''
user_command_entered, reset, states, relevance, temperature, topn, beam_width = process_user_command(
user_input, states, relevance, temperature, topn, beam_width)
if reset: states = initial_state_with_relevance_masking(net, sess, relevance)
if not user_command_entered:
states = forward_text(net, sess, states, relevance, vocab, sanitize_text(vocab, "> " + user_input + "\n>"))
computer_response_generator = beam_search_generator(sess=sess, net=net,
initial_state=copy.deepcopy(states), initial_sample=vocab[' '],
early_term_token=vocab['\n'], beam_width=beam_width, forward_model_fn=forward_with_mask,
forward_args={'relevance':relevance, 'mask_reset_token':vocab['\n'], 'forbidden_token':vocab['>'],
'temperature':temperature, 'topn':topn})
out_chars = []
for i, char_token in enumerate(computer_response_generator):
out_chars.append(chars[char_token])
# print(possibly_escaped_char(out_chars), end='', flush=True)
reply += possibly_escaped_char(out_chars)
states = forward_text(net, sess, states, relevance, vocab, chars[char_token])
if i >= max_length: break
#reply += " "
states = forward_text(net, sess, states, relevance, vocab, sanitize_text(vocab, "\n> "))
return reply
def process_user_command(user_input, states, relevance, temperature, topn, beam_width):
user_command_entered = False
reset = False
try:
if user_input.startswith('--temperature '):
user_command_entered = True
temperature = max(0.001, float(user_input[len('--temperature '):]))
print("[Temperature set to {}]".format(temperature))
elif user_input.startswith('--relevance '):
user_command_entered = True
new_relevance = float(user_input[len('--relevance '):])
if relevance <= 0. and new_relevance > 0.:
states = [states, copy.deepcopy(states)]
elif relevance > 0. and new_relevance <= 0.:
states = states[0]
relevance = new_relevance
print("[Relevance disabled]" if relevance <= 0. else "[Relevance set to {}]".format(relevance))
elif user_input.startswith('--topn '):
user_command_entered = True
topn = int(user_input[len('--topn '):])
print("[Top-n filtering disabled]" if topn <= 0 else "[Top-n filtering set to {}]".format(topn))
elif user_input.startswith('--beam_width '):
user_command_entered = True
beam_width = max(1, int(user_input[len('--beam_width '):]))
print("[Beam width set to {}]".format(beam_width))
elif user_input.startswith('--reset'):
user_command_entered = True
reset = True
print("[Model state reset]")
except ValueError:
print("[Value error with provided argument.]")
return user_command_entered, reset, states, relevance, temperature, topn, beam_width
def consensus_length(beam_outputs, early_term_token):
for l in range(len(beam_outputs[0])):
if l > 0 and beam_outputs[0][l-1] == early_term_token:
return l-1, True
for b in beam_outputs[1:]:
if beam_outputs[0][l] != b[l]: return l, False
return l, False
def scale_prediction(prediction, temperature):
if (temperature == 1.0): return prediction # Temperature 1.0 makes no change
np.seterr(divide='ignore')
scaled_prediction = np.log(prediction) / temperature
scaled_prediction = scaled_prediction - np.logaddexp.reduce(scaled_prediction)
scaled_prediction = np.exp(scaled_prediction)
np.seterr(divide='warn')
return scaled_prediction
def forward_with_mask(sess, net, states, input_sample, forward_args):
# forward_args is a dictionary containing arguments for generating probabilities.
relevance = forward_args['relevance']
mask_reset_token = forward_args['mask_reset_token']
forbidden_token = forward_args['forbidden_token']
temperature = forward_args['temperature']
topn = forward_args['topn']
if relevance <= 0.:
# No relevance masking.
prob, states = net.forward_model(sess, states, input_sample)
else:
# states should be a 2-length list: [primary net state, mask net state].
if input_sample == mask_reset_token:
# Reset the mask probs when reaching mask_reset_token (newline).
states[1] = initial_state(net, sess)
primary_prob, states[0] = net.forward_model(sess, states[0], input_sample)
primary_prob /= sum(primary_prob)
mask_prob, states[1] = net.forward_model(sess, states[1], input_sample)
mask_prob /= sum(mask_prob)
prob = np.exp(np.log(primary_prob) - relevance * np.log(mask_prob))
# Mask out the forbidden token (">") to prevent the bot from deciding the chat is over)
prob[forbidden_token] = 0
# Normalize probabilities so they sum to 1.
prob = prob / sum(prob)
# Apply temperature.
prob = scale_prediction(prob, temperature)
# Apply top-n filtering if enabled
if topn > 0:
prob[np.argsort(prob)[:-topn]] = 0
prob = prob / sum(prob)
return prob, states
def beam_search_generator(sess, net, initial_state, initial_sample,
early_term_token, beam_width, forward_model_fn, forward_args):
'''Run beam search! Yield consensus tokens sequentially, as a generator;
return when reaching early_term_token (newline).
Args:
sess: tensorflow session reference
net: tensorflow net graph (must be compatible with the forward_net function)
initial_state: initial hidden state of the net
initial_sample: single token (excluding any seed/priming material)
to start the generation
early_term_token: stop when the beam reaches consensus on this token
(but do not return this token).
beam_width: how many beams to track
forward_model_fn: function to forward the model, must be of the form:
probability_output, beam_state =
forward_model_fn(sess, net, beam_state, beam_sample, forward_args)
(Note: probability_output has to be a valid probability distribution!)
tot_steps: how many tokens to generate before stopping,
unless already stopped via early_term_token.
Returns: a generator to yield a sequence of beam-sampled tokens.'''
# Store state, outputs and probabilities for up to args.beam_width beams.
# Initialize with just the one starting entry; it will branch to fill the beam
# in the first step.
beam_states = [initial_state] # Stores the best activation states
beam_outputs = [[initial_sample]] # Stores the best generated output sequences so far.
beam_probs = [1.] # Stores the cumulative normalized probabilities of the beams so far.
while True:
# Keep a running list of the best beam branches for next step.
# Don't actually copy any big data structures yet, just keep references
# to existing beam state entries, and then clone them as necessary
# at the end of the generation step.
new_beam_indices = []
new_beam_probs = []
new_beam_samples = []
# Iterate through the beam entries.
for beam_index, beam_state in enumerate(beam_states):
beam_prob = beam_probs[beam_index]
beam_sample = beam_outputs[beam_index][-1]
# Forward the model.
prediction, beam_states[beam_index] = forward_model_fn(
sess, net, beam_state, beam_sample, forward_args)
# Sample best_tokens from the probability distribution.
# Sample from the scaled probability distribution beam_width choices
# (but not more than the number of positive probabilities in scaled_prediction).
count = min(beam_width, sum(1 if p > 0. else 0 for p in prediction))
best_tokens = np.random.choice(len(prediction), size=count,
replace=False, p=prediction)
for token in best_tokens:
prob = prediction[token] * beam_prob
if len(new_beam_indices) < beam_width:
# If we don't have enough new_beam_indices, we automatically qualify.
new_beam_indices.append(beam_index)
new_beam_probs.append(prob)
new_beam_samples.append(token)
else:
# Sample a low-probability beam to possibly replace.
np_new_beam_probs = np.array(new_beam_probs)
inverse_probs = -np_new_beam_probs + max(np_new_beam_probs) + min(np_new_beam_probs)
inverse_probs = inverse_probs / sum(inverse_probs)
sampled_beam_index = np.random.choice(beam_width, p=inverse_probs)
if new_beam_probs[sampled_beam_index] <= prob:
# Replace it.
new_beam_indices[sampled_beam_index] = beam_index
new_beam_probs[sampled_beam_index] = prob
new_beam_samples[sampled_beam_index] = token
# Replace the old states with the new states, first by referencing and then by copying.
already_referenced = [False] * beam_width
new_beam_states = []
new_beam_outputs = []
for i, new_index in enumerate(new_beam_indices):
if already_referenced[new_index]:
new_beam = copy.deepcopy(beam_states[new_index])
else:
new_beam = beam_states[new_index]
already_referenced[new_index] = True
new_beam_states.append(new_beam)
new_beam_outputs.append(beam_outputs[new_index] + [new_beam_samples[i]])
# Normalize the beam probabilities so they don't drop to zero
beam_probs = new_beam_probs / sum(new_beam_probs)
beam_states = new_beam_states
beam_outputs = new_beam_outputs
# Prune the agreed portions of the outputs
# and yield the tokens on which the beam has reached consensus.
l, early_term = consensus_length(beam_outputs, early_term_token)
if l > 0:
for token in beam_outputs[0][:l]: yield token
beam_outputs = [output[l:] for output in beam_outputs]
if early_term: return
if __name__ == '__main__':
states, sess, net, chars, vocab, args = main()
tf.reset_default_graph()
if __name__ == 'chatbot':
tf.reset_default_graph()
states, sess, net, chars, vocab, params = main()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
cmd/gateway/gcs/gateway-gcs.go | /*
* Minio Cloud Storage, (C) 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gcs
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
"path"
"strconv"
"os"
"regexp"
"strings"
"time"
"cloud.google.com/go/storage"
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/policy/condition"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
minio "github.com/minio/minio/cmd"
)
var (
// Project ID format is not valid.
errGCSInvalidProjectID = fmt.Errorf("GCS project id is either empty or invalid")
// Project ID not found
errGCSProjectIDNotFound = fmt.Errorf("Unknown project id")
// Invalid format.
errGCSFormat = fmt.Errorf("Unknown format")
)
const (
// Path where multipart objects are saved.
// If we change the backend format we will use a different url path like /multipart/v2
// but we will not migrate old data.
gcsMinioMultipartPathV1 = minio.GatewayMinioSysTmp + "multipart/v1"
// Multipart meta file.
gcsMinioMultipartMeta = "gcs.json"
// gcs.json version number
gcsMinioMultipartMetaCurrentVersion = "1"
// token prefixed with GCS returned marker to differentiate
// from user supplied marker.
gcsTokenPrefix = "{minio}"
// Maximum component object count to create a composite object.
// Refer https://cloud.google.com/storage/docs/composite-objects
gcsMaxComponents = 32
// Every 24 hours we scan minio.sys.tmp to delete expired multiparts in minio.sys.tmp
gcsCleanupInterval = time.Hour * 24
// The cleanup routine deletes files older than 2 weeks in minio.sys.tmp
gcsMultipartExpiry = time.Hour * 24 * 14
// Project ID key in credentials.json
gcsProjectIDKey = "project_id"
gcsBackend = "gcs"
)
func init() {
const gcsGatewayTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [PROJECTID]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
PROJECTID:
GCS project-id should be provided if GOOGLE_APPLICATION_CREDENTIALS environmental variable is not set.
ENVIRONMENT VARIABLES:
ACCESS:
MINIO_ACCESS_KEY: Username or access key of GCS.
MINIO_SECRET_KEY: Password or secret key of GCS.
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
DOMAIN:
MINIO_DOMAIN: To enable virtual-host-style requests, set this value to Minio host domain name.
CACHE:
MINIO_CACHE_DRIVES: List of mounted drives or directories delimited by ";".
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";".
MINIO_CACHE_EXPIRY: Cache expiry duration in days.
MINIO_CACHE_MAXUSE: Maximum permitted usage of the cache in percentage (0-100).
GCS credentials file:
GOOGLE_APPLICATION_CREDENTIALS: Path to credentials.json
EXAMPLES:
1. Start minio gateway server for GCS backend.
$ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json
(Instructions to generate credentials : https://developers.google.com/identity/protocols/application-default-credentials)
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ {{.HelpName}} mygcsprojectid
2. Start minio gateway server for GCS backend with edge caching enabled.
$ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ export MINIO_CACHE_DRIVES="/mnt/drive1;/mnt/drive2;/mnt/drive3;/mnt/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ export MINIO_CACHE_MAXUSE=80
$ {{.HelpName}} mygcsprojectid
`
minio.RegisterGatewayCommand(cli.Command{
Name: gcsBackend,
Usage: "Google Cloud Storage.",
Action: gcsGatewayMain,
CustomHelpTemplate: gcsGatewayTemplate,
HideHelpCommand: true,
})
}
// Handler for 'minio gateway gcs' command line.
func gcsGatewayMain(ctx *cli.Context) {
projectID := ctx.Args().First()
if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" {
logger.LogIf(context.Background(), errGCSProjectIDNotFound)
cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
}
if projectID != "" && !isValidGCSProjectIDFormat(projectID) {
reqInfo := (&logger.ReqInfo{}).AppendTags("projectID", ctx.Args().First())
contxt := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(contxt, errGCSInvalidProjectID)
cli.ShowCommandHelpAndExit(ctx, "gcs", 1)
}
minio.StartGateway(ctx, &GCS{projectID})
}
// GCS implements Azure.
type GCS struct {
projectID string
}
// Name returns the name of gcs ObjectLayer.
func (g *GCS) Name() string {
return gcsBackend
}
// NewGatewayLayer returns gcs ObjectLayer.
func (g *GCS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
ctx := context.Background()
var err error
if g.projectID == "" {
// If project ID is not provided on command line, we figure it out
// from the credentials.json file.
g.projectID, err = gcsParseProjectID(os.Getenv("GOOGLE_APPLICATION_CREDENTIALS"))
if err != nil {
return nil, err
}
}
// Initialize a GCS client.
// Send user-agent in this format for Google to obtain usage insights while participating in the
// Google Cloud Technology Partners (https://cloud.google.com/partners/)
client, err := storage.NewClient(ctx, option.WithUserAgent(fmt.Sprintf("Minio/%s (GPN:Minio;)", minio.Version)))
if err != nil {
return nil, err
}
gcs := &gcsGateway{
client: client,
projectID: g.projectID,
ctx: ctx,
}
// Start background process to cleanup old files in minio.sys.tmp
go gcs.CleanupGCSMinioSysTmp()
return gcs, nil
}
// Production - GCS gateway is production ready.
func (g *GCS) Production() bool {
return true
}
// Stored in gcs.json - Contents of this file is not used anywhere. It can be
// used for debugging purposes.
type gcsMultipartMetaV1 struct {
Version string `json:"version"` // Version number
Bucket string `json:"bucket"` // Bucket name
Object string `json:"object"` // Object name
}
// Returns name of the multipart meta object.
func gcsMultipartMetaName(uploadID string) string {
return fmt.Sprintf("%s/%s/%s", gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta)
}
// Returns name of the part object.
func gcsMultipartDataName(uploadID string, partNumber int, etag string) string {
return fmt.Sprintf("%s/%s/%05d.%s", gcsMinioMultipartPathV1, uploadID, partNumber, etag)
}
// Convert Minio errors to minio object layer errors.
func gcsToObjectError(err error, params ...string) error {
if err == nil {
return nil
}
bucket := ""
object := ""
uploadID := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
if len(params) == 3 {
uploadID = params[2]
}
// in some cases just a plain error is being returned
switch err.Error() {
case "storage: bucket doesn't exist":
err = minio.BucketNotFound{
Bucket: bucket,
}
return err
case "storage: object doesn't exist":
if uploadID != "" {
err = minio.InvalidUploadID{
UploadID: uploadID,
}
} else {
err = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
}
return err
}
googleAPIErr, ok := err.(*googleapi.Error)
if !ok {
// We don't interpret non Minio errors. As minio errors will
// have StatusCode to help to convert to object errors.
return err
}
if len(googleAPIErr.Errors) == 0 {
return err
}
reason := googleAPIErr.Errors[0].Reason
message := googleAPIErr.Errors[0].Message
switch reason {
case "required":
// Anonymous users does not have storage.xyz access to project 123.
fallthrough
case "keyInvalid":
fallthrough
case "forbidden":
err = minio.PrefixAccessDenied{
Bucket: bucket,
Object: object,
}
case "invalid":
err = minio.BucketNameInvalid{
Bucket: bucket,
}
case "notFound":
if object != "" {
err = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
break
}
err = minio.BucketNotFound{Bucket: bucket}
case "conflict":
if message == "You already own this bucket. Please select another name." {
err = minio.BucketAlreadyOwnedByYou{Bucket: bucket}
break
}
if message == "Sorry, that name is not available. Please try a different one." {
err = minio.BucketAlreadyExists{Bucket: bucket}
break
}
err = minio.BucketNotEmpty{Bucket: bucket}
default:
err = fmt.Errorf("Unsupported error reason: %s", reason)
}
return err
}
// gcsProjectIDRegex defines a valid gcs project id format
var gcsProjectIDRegex = regexp.MustCompile("^[a-z][a-z0-9-]{5,29}$")
// isValidGCSProjectIDFormat - checks if a given project id format is valid or not.
// Project IDs must start with a lowercase letter and can have lowercase ASCII letters,
// digits or hyphens. Project IDs must be between 6 and 30 characters.
// Ref: https://cloud.google.com/resource-manager/reference/rest/v1/projects#Project (projectId section)
func isValidGCSProjectIDFormat(projectID string) bool {
// Checking projectID format
return gcsProjectIDRegex.MatchString(projectID)
}
// gcsGateway - Implements gateway for Minio and GCS compatible object storage servers.
type gcsGateway struct {
minio.GatewayUnsupported
client *storage.Client
projectID string
ctx context.Context
}
// Returns projectID from the GOOGLE_APPLICATION_CREDENTIALS file.
func gcsParseProjectID(credsFile string) (projectID string, err error) {
contents, err := ioutil.ReadFile(credsFile)
if err != nil {
return projectID, err
}
googleCreds := make(map[string]string)
if err = json.Unmarshal(contents, &googleCreds); err != nil {
return projectID, err
}
return googleCreds[gcsProjectIDKey], err
}
// Cleanup old files in minio.sys.tmp of the given bucket.
func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(bucket string) {
it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Prefix: minio.GatewayMinioSysTmp, Versions: false})
for {
attrs, err := it.Next()
if err != nil {
if err != iterator.Done {
reqInfo := &logger.ReqInfo{BucketName: bucket}
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
return
}
if time.Since(attrs.Updated) > gcsMultipartExpiry {
// Delete files older than 2 weeks.
err := l.client.Bucket(bucket).Object(attrs.Name).Delete(l.ctx)
if err != nil {
reqInfo := &logger.ReqInfo{BucketName: bucket, ObjectName: attrs.Name}
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return
}
}
}
}
// Cleanup old files in minio.sys.tmp of all buckets.
func (l *gcsGateway) CleanupGCSMinioSysTmp() {
for {
it := l.client.Buckets(l.ctx, l.projectID)
for {
attrs, err := it.Next()
if err != nil {
if err != iterator.Done {
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
logger.LogIf(ctx, err)
}
break
}
l.CleanupGCSMinioSysTmpBucket(attrs.Name)
}
// Run the cleanup loop every 1 day.
time.Sleep(gcsCleanupInterval)
}
}
// Shutdown - save any gateway metadata to disk
// if necessary and reload upon next restart.
func (l *gcsGateway) Shutdown(ctx context.Context) error {
return nil
}
// StorageInfo - Not relevant to GCS backend.
func (l *gcsGateway) StorageInfo(ctx context.Context) minio.StorageInfo {
return minio.StorageInfo{}
}
// MakeBucketWithLocation - Create a new container on GCS backend.
func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
bkt := l.client.Bucket(bucket)
// we'll default to the us multi-region in case of us-east-1
if location == "us-east-1" {
location = "us"
}
err := bkt.Create(l.ctx, l.projectID, &storage.BucketAttrs{
Location: location,
})
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
// GetBucketInfo - Get bucket metadata..
func (l *gcsGateway) GetBucketInfo(ctx context.Context, bucket string) (minio.BucketInfo, error) {
attrs, err := l.client.Bucket(bucket).Attrs(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.BucketInfo{}, gcsToObjectError(err, bucket)
}
return minio.BucketInfo{
Name: attrs.Name,
Created: attrs.Created,
}, nil
}
// ListBuckets lists all buckets under your project-id on GCS.
func (l *gcsGateway) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
it := l.client.Buckets(l.ctx, l.projectID)
// Iterate and capture all the buckets.
for {
attrs, ierr := it.Next()
if ierr == iterator.Done {
break
}
if ierr != nil {
logger.LogIf(ctx, ierr)
return buckets, gcsToObjectError(ierr)
}
buckets = append(buckets, minio.BucketInfo{
Name: attrs.Name,
Created: attrs.Created,
})
}
return buckets, nil
}
// DeleteBucket delete a bucket on GCS.
func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
itObject := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{
Delimiter: "/",
Versions: false,
})
// We list the bucket and if we find any objects we return BucketNotEmpty error. If we
// find only "minio.sys.tmp/" then we remove it before deleting the bucket.
gcsMinioPathFound := false
nonGCSMinioPathFound := false
for {
objAttrs, err := itObject.Next()
if err == iterator.Done {
break
}
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err)
}
if objAttrs.Prefix == minio.GatewayMinioSysTmp {
gcsMinioPathFound = true
continue
}
nonGCSMinioPathFound = true
break
}
if nonGCSMinioPathFound {
logger.LogIf(ctx, minio.BucketNotEmpty{})
return gcsToObjectError(minio.BucketNotEmpty{})
}
if gcsMinioPathFound {
// Remove minio.sys.tmp before deleting the bucket.
itObject = l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Versions: false, Prefix: minio.GatewayMinioSysTmp})
for {
objAttrs, err := itObject.Next()
if err == iterator.Done {
break
}
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err)
}
err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err)
}
}
}
err := l.client.Bucket(bucket).Delete(l.ctx)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
func toGCSPageToken(name string) string {
length := uint16(len(name))
b := []byte{
0xa,
byte(length & 0xFF),
}
length = length >> 7
if length > 0 {
b = append(b, byte(length&0xFF))
}
b = append(b, []byte(name)...)
return base64.StdEncoding.EncodeToString(b)
}
// Returns true if marker was returned by GCS, i.e prefixed with
// ##minio by minio gcs minio.
func isGCSMarker(marker string) bool {
return strings.HasPrefix(marker, gcsTokenPrefix)
}
// ListObjects - lists all blobs in GCS bucket filtered by prefix
func (l *gcsGateway) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) {
if maxKeys == 0 {
return minio.ListObjectsInfo{}, nil
}
it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{
Delimiter: delimiter,
Prefix: prefix,
Versions: false,
})
// To accommodate S3-compatible applications using
// ListObjectsV1 to use object keys as markers to control the
// listing of objects, we use the following encoding scheme to
// distinguish between GCS continuation tokens and application
// supplied markers.
//
// - NextMarker in ListObjectsV1 response is constructed by
// prefixing "{minio}" to the GCS continuation token,
// e.g, "{minio}CgRvYmoz"
//
// - Application supplied markers are transformed to a
// GCS continuation token.
// If application is using GCS continuation token we should
// strip the gcsTokenPrefix we added.
token := ""
if marker != "" {
if isGCSMarker(marker) {
token = strings.TrimPrefix(marker, gcsTokenPrefix)
} else {
token = toGCSPageToken(marker)
}
}
nextMarker := ""
var prefixes []string
var objects []minio.ObjectInfo
var nextPageToken string
var err error
pager := iterator.NewPager(it, maxKeys, token)
for {
gcsObjects := make([]*storage.ObjectAttrs, 0)
nextPageToken, err = pager.NextPage(&gcsObjects)
if err != nil {
logger.LogIf(ctx, err)
return minio.ListObjectsInfo{}, gcsToObjectError(err, bucket, prefix)
}
for _, attrs := range gcsObjects {
// Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes
// returned may not total maxKeys. This behavior is compatible with the S3 spec which
// allows the response to include less keys than maxKeys.
if attrs.Prefix == minio.GatewayMinioSysTmp {
// We don't return our metadata prefix.
continue
}
if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) {
// If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries.
// But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/
// which will be helpful to observe the "directory structure" for debugging purposes.
if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) ||
strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) {
continue
}
}
if attrs.Prefix != "" {
prefixes = append(prefixes, attrs.Prefix)
} else {
objects = append(objects, fromGCSAttrsToObjectInfo(attrs))
}
// The NextMarker property should only be set in the response if a delimiter is used
if delimiter != "" {
if attrs.Prefix > nextMarker {
nextMarker = attrs.Prefix
} else if attrs.Name > nextMarker {
nextMarker = attrs.Name
}
}
}
// Exit the loop if at least one item can be returned from
// the current page or there are no more pages available
if nextPageToken == "" || len(prefixes)+len(objects) > 0 {
break
}
}
if nextPageToken == "" {
nextMarker = ""
} else if nextMarker != "" {
nextMarker = gcsTokenPrefix + toGCSPageToken(nextMarker)
}
return minio.ListObjectsInfo{
IsTruncated: nextPageToken != "",
NextMarker: nextMarker,
Prefixes: prefixes,
Objects: objects,
}, nil
}
// ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix
func (l *gcsGateway) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) {
if maxKeys == 0 {
return minio.ListObjectsV2Info{ContinuationToken: continuationToken}, nil
}
it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{
Delimiter: delimiter,
Prefix: prefix,
Versions: false,
})
token := continuationToken
if token == "" && startAfter != "" {
token = toGCSPageToken(startAfter)
}
var prefixes []string
var objects []minio.ObjectInfo
var nextPageToken string
var err error
pager := iterator.NewPager(it, maxKeys, token)
for {
gcsObjects := make([]*storage.ObjectAttrs, 0)
nextPageToken, err = pager.NextPage(&gcsObjects)
if err != nil {
logger.LogIf(ctx, err)
return minio.ListObjectsV2Info{}, gcsToObjectError(err, bucket, prefix)
}
for _, attrs := range gcsObjects {
// Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes
// returned may not total maxKeys. This behavior is compatible with the S3 spec which
// allows the response to include less keys than maxKeys.
if attrs.Prefix == minio.GatewayMinioSysTmp {
// We don't return our metadata prefix.
continue
}
if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) {
// If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries.
// But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/
// which will be helpful to observe the "directory structure" for debugging purposes.
if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) ||
strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) {
continue
}
}
if attrs.Prefix != "" {
prefixes = append(prefixes, attrs.Prefix)
} else {
objects = append(objects, fromGCSAttrsToObjectInfo(attrs))
}
}
// Exit the loop if at least one item can be returned from
// the current page or there are no more pages available
if nextPageToken == "" || len(prefixes)+len(objects) > 0 {
break
}
}
return minio.ListObjectsV2Info{
IsTruncated: nextPageToken != "",
ContinuationToken: continuationToken,
NextContinuationToken: nextPageToken,
Prefixes: prefixes,
Objects: objects,
}, nil
}
// GetObjectNInfo - returns object info and locked object ReadCloser
func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType) (gr *minio.GetObjectReader, err error) {
var objInfo minio.ObjectInfo
objInfo, err = l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, minio.ObjectOptions{})
pw.CloseWithError(err)
}()
// Setup cleanup function to cause the above go-routine to
// exit in case of partial read
pipeCloser := func() { pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, objInfo, pipeCloser), nil
}
// GetObject - reads an object from GCS. Supports additional
// parameters like offset and length which are synonymous with
// HTTP Range requests.
//
// startOffset indicates the starting read location of the object.
// length indicates the total length of the object.
func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
// GCS storage decompresses a gzipped object by default and returns the data.
// Refer to https://cloud.google.com/storage/docs/transcoding#decompressive_transcoding
// Need to set `Accept-Encoding` header to `gzip` when issuing a GetObject call, to be able
// to download the object in compressed state.
// Calling ReadCompressed with true accomplishes that.
object := l.client.Bucket(bucket).Object(key).ReadCompressed(true)
r, err := object.NewRangeReader(l.ctx, startOffset, length)
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
}
defer r.Close()
if _, err := io.Copy(writer, r); err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
}
return nil
}
// fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo
func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) minio.ObjectInfo {
// All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash
// Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag
metadata := make(map[string]string)
for k, v := range attrs.Metadata {
k = http.CanonicalHeaderKey(k)
// Translate the GCS custom metadata prefix
if strings.HasPrefix(k, "X-Goog-Meta-") {
k = strings.Replace(k, "X-Goog-Meta-", "X-Amz-Meta-", 1)
}
metadata[k] = v
}
if attrs.ContentType != "" {
metadata["Content-Type"] = attrs.ContentType
}
if attrs.ContentEncoding != "" {
metadata["Content-Encoding"] = attrs.ContentEncoding
}
if attrs.CacheControl != "" {
metadata["Cache-Control"] = attrs.CacheControl
}
if attrs.ContentDisposition != "" {
metadata["Content-Disposition"] = attrs.ContentDisposition
}
if attrs.ContentLanguage != "" {
metadata["Content-Language"] = attrs.ContentLanguage
}
return minio.ObjectInfo{
Name: attrs.Name,
Bucket: attrs.Bucket,
ModTime: attrs.Updated,
Size: attrs.Size,
ETag: minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)),
UserDefined: metadata,
ContentType: attrs.ContentType,
ContentEncoding: attrs.ContentEncoding,
}
}
// applyMetadataToGCSAttrs applies metadata to a GCS ObjectAttrs instance
func applyMetadataToGCSAttrs(metadata map[string]string, attrs *storage.ObjectAttrs) {
attrs.Metadata = make(map[string]string)
for k, v := range metadata {
k = http.CanonicalHeaderKey(k)
switch {
case strings.HasPrefix(k, "X-Amz-Meta-"):
// Translate the S3 user-defined metadata prefix
k = strings.Replace(k, "X-Amz-Meta-", "x-goog-meta-", 1)
attrs.Metadata[k] = v
case k == "Content-Type":
attrs.ContentType = v
case k == "Content-Encoding":
attrs.ContentEncoding = v
case k == "Cache-Control":
attrs.CacheControl = v
case k == "Content-Disposition":
attrs.ContentDisposition = v
case k == "Content-Language":
attrs.ContentLanguage = v
}
}
}
// GetObjectInfo - reads object info and replies back ObjectInfo
func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket)
}
attrs, err := l.client.Bucket(bucket).Object(object).Attrs(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object)
}
return fromGCSAttrsToObjectInfo(attrs), nil
}
// PutObject - Create a new object with the incoming data,
func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
// otherwise gcs will just return object not exist in case of non-existing bucket
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket)
}
object := l.client.Bucket(bucket).Object(key)
w := object.NewWriter(l.ctx)
// Disable "chunked" uploading in GCS client if the size of the data to be uploaded is below
// the current chunk-size of the writer. This avoids an unnecessary memory allocation.
if data.Size() < int64(w.ChunkSize) {
w.ChunkSize = 0
}
applyMetadataToGCSAttrs(metadata, &w.ObjectAttrs)
if _, err := io.Copy(w, data); err != nil {
// Close the object writer upon error.
w.CloseWithError(err)
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
// Close the object writer upon success.
w.Close()
attrs, err := object.Attrs(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
return fromGCSAttrsToObjectInfo(attrs), nil
}
// CopyObject - Copies a blob from source container to destination container.
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
src := l.client.Bucket(srcBucket).Object(srcObject)
dst := l.client.Bucket(destBucket).Object(destObject)
copier := dst.CopierFrom(src)
applyMetadataToGCSAttrs(srcInfo.UserDefined, &copier.ObjectAttrs)
attrs, err := copier.Run(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, destBucket, destObject)
}
return fromGCSAttrsToObjectInfo(attrs), nil
}
// DeleteObject - Deletes a blob in bucket
func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string) error {
err := l.client.Bucket(bucket).Object(object).Delete(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, object)
}
return nil
}
// NewMultipartUpload - upload object in multiple parts
func (l *gcsGateway) NewMultipartUpload(ctx context.Context, bucket string, key string, metadata map[string]string, o minio.ObjectOptions) (uploadID string, err error) {
// generate new uploadid
uploadID = minio.MustGetUUID()
// generate name for part zero
meta := gcsMultipartMetaName(uploadID)
w := l.client.Bucket(bucket).Object(meta).NewWriter(l.ctx)
defer w.Close()
applyMetadataToGCSAttrs(metadata, &w.ObjectAttrs)
if err = json.NewEncoder(w).Encode(gcsMultipartMetaV1{
gcsMinioMultipartMetaCurrentVersion,
bucket,
key,
}); err != nil {
logger.LogIf(ctx, err)
return "", gcsToObjectError(err, bucket, key)
}
return uploadID, nil
}
// ListMultipartUploads - lists the (first) multipart upload for an object
// matched _exactly_ by the prefix
func (l *gcsGateway) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (minio.ListMultipartsInfo, error) {
// List objects under <bucket>/gcsMinioMultipartPathV1
it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
Prefix: gcsMinioMultipartPathV1,
})
var uploads []minio.MultipartInfo
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
logger.LogIf(ctx, err)
return minio.ListMultipartsInfo{
KeyMarker: keyMarker,
UploadIDMarker: uploadIDMarker,
MaxUploads: maxUploads,
Prefix: prefix,
Delimiter: delimiter,
}, gcsToObjectError(err)
}
// Skip entries other than gcs.json
if !strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) {
continue
}
// Extract multipart upload information from gcs.json
obj := l.client.Bucket(bucket).Object(attrs.Name)
objReader, rErr := obj.NewReader(ctx)
if rErr != nil {
logger.LogIf(ctx, rErr)
return minio.ListMultipartsInfo{}, rErr
}
defer objReader.Close()
var mpMeta gcsMultipartMetaV1
dec := json.NewDecoder(objReader)
decErr := dec.Decode(&mpMeta)
if decErr != nil {
logger.LogIf(ctx, decErr)
return minio.ListMultipartsInfo{}, decErr
}
if prefix == mpMeta.Object {
// Extract uploadId
// E.g minio.sys.tmp/multipart/v1/d063ad89-fdc4-4ea3-a99e-22dba98151f5/gcs.json
components := strings.SplitN(attrs.Name, "/", 5)
if len(components) != 5 {
compErr := errors.New("Invalid multipart upload format")
logger.LogIf(ctx, compErr)
return minio.ListMultipartsInfo{}, compErr
}
upload := minio.MultipartInfo{
Object: mpMeta.Object,
UploadID: components[3],
Initiated: attrs.Created,
}
uploads = []minio.MultipartInfo{upload}
break
}
}
return minio.ListMultipartsInfo{
KeyMarker: keyMarker,
UploadIDMarker: uploadIDMarker,
MaxUploads: maxUploads,
Prefix: prefix,
Delimiter: delimiter,
Uploads: uploads,
NextKeyMarker: "",
NextUploadIDMarker: "",
IsTruncated: false,
}, nil
}
// Checks if minio.sys.tmp/multipart/v1/<upload-id>/gcs.json exists, returns
// an object layer compatible error upon any error.
func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key string, uploadID string) error {
_, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(l.ctx)
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key, uploadID)
}
// PutObjectPart puts a part of object in bucket
func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key string, uploadID string, partNumber int, data *hash.Reader, opts minio.ObjectOptions) (minio.PartInfo, error) {
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
return minio.PartInfo{}, err
}
etag := data.MD5HexString()
if etag == "" {
// Generate random ETag.
etag = minio.GenETag()
}
object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag))
w := object.NewWriter(l.ctx)
// Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case
// where it tries to upload 0 bytes in the last chunk and get error from server.
w.ChunkSize = 0
if _, err := io.Copy(w, data); err != nil {
// Make sure to close object writer upon error.
w.Close()
logger.LogIf(ctx, err)
return minio.PartInfo{}, gcsToObjectError(err, bucket, key)
}
// Make sure to close the object writer upon success.
w.Close()
return minio.PartInfo{
PartNumber: partNumber,
ETag: etag,
LastModified: minio.UTCNow(),
Size: data.Size(),
}, nil
}
// gcsGetPartInfo returns PartInfo of a given object part
func gcsGetPartInfo(ctx context.Context, attrs *storage.ObjectAttrs) (minio.PartInfo, error) {
components := strings.SplitN(attrs.Name, "/", 5)
if len(components) != 5 {
logger.LogIf(ctx, errors.New("Invalid multipart upload format"))
return minio.PartInfo{}, errors.New("Invalid multipart upload format")
}
partComps := strings.SplitN(components[4], ".", 2)
if len(partComps) != 2 {
logger.LogIf(ctx, errors.New("Invalid multipart part format"))
return minio.PartInfo{}, errors.New("Invalid multipart part format")
}
partNum, pErr := strconv.Atoi(partComps[0])
if pErr != nil {
logger.LogIf(ctx, pErr)
return minio.PartInfo{}, errors.New("Invalid part number")
}
return minio.PartInfo{
PartNumber: partNum,
LastModified: attrs.Updated,
Size: attrs.Size,
ETag: partComps[1],
}, nil
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *gcsGateway) ListObjectParts(ctx context.Context, bucket string, key string, uploadID string, partNumberMarker int, maxParts int) (minio.ListPartsInfo, error) {
it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
Prefix: path.Join(gcsMinioMultipartPathV1, uploadID),
})
var (
count int
partInfos []minio.PartInfo
)
isTruncated := true
for count < maxParts {
attrs, err := it.Next()
if err == iterator.Done {
isTruncated = false
break
}
if err != nil {
logger.LogIf(ctx, err)
return minio.ListPartsInfo{}, gcsToObjectError(err)
}
if strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) {
continue
}
partInfo, pErr := gcsGetPartInfo(ctx, attrs)
if pErr != nil {
logger.LogIf(ctx, pErr)
return minio.ListPartsInfo{}, pErr
}
if partInfo.PartNumber <= partNumberMarker {
continue
}
partInfos = append(partInfos, partInfo)
count++
}
nextPartNumberMarker := 0
if isTruncated {
nextPartNumberMarker = partInfos[maxParts-1].PartNumber
}
return minio.ListPartsInfo{
Bucket: bucket,
Object: key,
UploadID: uploadID,
PartNumberMarker: partNumberMarker,
NextPartNumberMarker: nextPartNumberMarker,
MaxParts: maxParts,
Parts: partInfos,
IsTruncated: isTruncated,
}, nil
}
// Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up.
func (l *gcsGateway) cleanupMultipartUpload(ctx context.Context, bucket, key, uploadID string) error {
prefix := fmt.Sprintf("%s/%s/", gcsMinioMultipartPathV1, uploadID)
// iterate through all parts and delete them
it := l.client.Bucket(bucket).Objects(l.ctx, &storage.Query{Prefix: prefix, Versions: false})
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, key)
}
object := l.client.Bucket(bucket).Object(attrs.Name)
// Ignore the error as parallel AbortMultipartUpload might have deleted it.
object.Delete(l.ctx)
}
return nil
}
// AbortMultipartUpload aborts a ongoing multipart upload
func (l *gcsGateway) AbortMultipartUpload(ctx context.Context, bucket string, key string, uploadID string) error {
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
return err
}
return l.cleanupMultipartUpload(ctx, bucket, key, uploadID)
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
// Note that there is a limit (currently 32) to the number of components that can
// be composed in a single operation. There is a per-project rate limit (currently 200)
// to the number of source objects you can compose per second.
func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string, key string, uploadID string, uploadedParts []minio.CompletePart) (minio.ObjectInfo, error) {
meta := gcsMultipartMetaName(uploadID)
object := l.client.Bucket(bucket).Object(meta)
partZeroAttrs, err := object.Attrs(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key, uploadID)
}
r, err := object.NewReader(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
defer r.Close()
// Check version compatibility of the meta file before compose()
multipartMeta := gcsMultipartMetaV1{}
if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion {
logger.LogIf(ctx, errGCSFormat)
return minio.ObjectInfo{}, gcsToObjectError(errGCSFormat, bucket, key)
}
// Validate if the gcs.json stores valid entries for the bucket and key.
if multipartMeta.Bucket != bucket || multipartMeta.Object != key {
return minio.ObjectInfo{}, gcsToObjectError(minio.InvalidUploadID{
UploadID: uploadID,
}, bucket, key)
}
var parts []*storage.ObjectHandle
partSizes := make([]int64, len(uploadedParts))
for i, uploadedPart := range uploadedParts {
parts = append(parts, l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID,
uploadedPart.PartNumber, uploadedPart.ETag)))
partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(l.ctx)
if pErr != nil {
logger.LogIf(ctx, pErr)
return minio.ObjectInfo{}, gcsToObjectError(pErr, bucket, key, uploadID)
}
partSizes[i] = partAttr.Size
}
// Error out if parts except last part sizing < 5MiB.
for i, size := range partSizes[:len(partSizes)-1] {
if size < 5*humanize.MiByte {
logger.LogIf(ctx, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
})
return minio.ObjectInfo{}, minio.PartTooSmall{
PartNumber: uploadedParts[i].PartNumber,
PartSize: size,
PartETag: uploadedParts[i].ETag,
}
}
}
// Returns name of the composed object.
gcsMultipartComposeName := func(uploadID string, composeNumber int) string {
return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", minio.GatewayMinioSysTmp, uploadID, composeNumber)
}
composeCount := int(math.Ceil(float64(len(parts)) / float64(gcsMaxComponents)))
if composeCount > 1 {
// Create composes of every 32 parts.
composeParts := make([]*storage.ObjectHandle, composeCount)
for i := 0; i < composeCount; i++ {
// Create 'composed-object-N' using next 32 parts.
composeParts[i] = l.client.Bucket(bucket).Object(gcsMultipartComposeName(uploadID, i))
start := i * gcsMaxComponents
end := start + gcsMaxComponents
if end > len(parts) {
end = len(parts)
}
composer := composeParts[i].ComposerFrom(parts[start:end]...)
composer.ContentType = partZeroAttrs.ContentType
composer.Metadata = partZeroAttrs.Metadata
if _, err = composer.Run(l.ctx); err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
}
// As composes are successfully created, final object needs to be created using composes.
parts = composeParts
}
composer := l.client.Bucket(bucket).Object(key).ComposerFrom(parts...)
composer.ContentType = partZeroAttrs.ContentType
composer.ContentEncoding = partZeroAttrs.ContentEncoding
composer.CacheControl = partZeroAttrs.CacheControl
composer.ContentDisposition = partZeroAttrs.ContentDisposition
composer.ContentLanguage = partZeroAttrs.ContentLanguage
composer.Metadata = partZeroAttrs.Metadata
attrs, err := composer.Run(l.ctx)
if err != nil {
logger.LogIf(ctx, err)
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
if err = l.cleanupMultipartUpload(ctx, bucket, key, uploadID); err != nil {
return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key)
}
return fromGCSAttrsToObjectInfo(attrs), nil
}
// SetBucketPolicy - Set policy on bucket
func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error {
policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy)
if err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
var policies []minio.BucketAccessPolicy
for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") {
policies = append(policies, minio.BucketAccessPolicy{
Prefix: prefix,
Policy: policy,
})
}
prefix := bucket + "/*" // For all objects inside the bucket.
if len(policies) != 1 {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if policies[0].Prefix != prefix {
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
acl := l.client.Bucket(bucket).ACL()
if policies[0].Policy == miniogopolicy.BucketPolicyNone {
if err := acl.Delete(l.ctx, storage.AllUsers); err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
return nil
}
var role storage.ACLRole
switch policies[0].Policy {
case miniogopolicy.BucketPolicyReadOnly:
role = storage.RoleReader
case miniogopolicy.BucketPolicyWriteOnly:
role = storage.RoleWriter
default:
logger.LogIf(ctx, minio.NotImplemented{})
return minio.NotImplemented{}
}
if err := acl.Set(l.ctx, storage.AllUsers, role); err != nil {
logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket)
}
return nil
}
// GetBucketPolicy - Get policy on bucket
func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
rules, err := l.client.Bucket(bucket).ACL().List(l.ctx)
if err != nil {
return nil, gcsToObjectError(err, bucket)
}
var readOnly, writeOnly bool
for _, r := range rules {
if r.Entity != storage.AllUsers || r.Role == storage.RoleOwner {
continue
}
switch r.Role {
case storage.RoleReader:
readOnly = true
case storage.RoleWriter:
writeOnly = true
}
}
actionSet := policy.NewActionSet()
if readOnly {
actionSet.Add(policy.GetBucketLocationAction)
actionSet.Add(policy.ListBucketAction)
actionSet.Add(policy.GetObjectAction)
}
if writeOnly {
actionSet.Add(policy.GetBucketLocationAction)
actionSet.Add(policy.ListBucketMultipartUploadsAction)
actionSet.Add(policy.AbortMultipartUploadAction)
actionSet.Add(policy.DeleteObjectAction)
actionSet.Add(policy.ListMultipartUploadPartsAction)
actionSet.Add(policy.PutObjectAction)
}
// Return NoSuchBucketPolicy error, when policy is not set
if len(actionSet) == 0 {
return nil, gcsToObjectError(minio.BucketPolicyNotFound{}, bucket)
}
return &policy.Policy{
Version: policy.DefaultVersion,
Statements: []policy.Statement{
policy.NewStatement(
policy.Allow,
policy.NewPrincipal("*"),
actionSet,
policy.NewResourceSet(
policy.NewResource(bucket, ""),
policy.NewResource(bucket, "*"),
),
condition.NewFunctions(),
),
},
}, nil
}
// DeleteBucketPolicy - Delete all policies on bucket
func (l *gcsGateway) DeleteBucketPolicy(ctx context.Context, bucket string) error {
// This only removes the storage.AllUsers policies
if err := l.client.Bucket(bucket).ACL().Delete(l.ctx, storage.AllUsers); err != nil {
return gcsToObjectError(err, bucket)
}
return nil
}
| [
"\"GOOGLE_APPLICATION_CREDENTIALS\"",
"\"GOOGLE_APPLICATION_CREDENTIALS\""
]
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | go | 1 | 0 | |
infrastructure-provisioning/src/ssn/fabfile.py | #!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import logging
import os
import sys
import traceback
import uuid
from datalab.fab import *
from fabric import *
@task
def run(ctx):
local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
ssn_config = dict()
ssn_config['ssn_unique_index'] = str(uuid.uuid4())[:5]
try:
subprocess.run("~/scripts/{}.py --ssn_unique_index {}".format('ssn_prepare', ssn_config['ssn_unique_index']), shell=True, check=True)
except Exception as err:
traceback.print_exc()
append_result("Failed preparing SSN node.", str(err))
sys.exit(1)
try:
subprocess.run("~/scripts/{}.py --ssn_unique_index {}".format('ssn_configure', ssn_config['ssn_unique_index']), shell=True, check=True)
except Exception as err:
traceback.print_exc()
append_result("Failed configuring SSN node.", str(err))
sys.exit(1)
@task
def terminate(ctx):
local_log_filename = "{}_{}.log".format(os.environ['conf_resource'], os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
subprocess.run("~/scripts/{}.py".format('ssn_terminate'), shell=True, check=True)
except Exception as err:
traceback.print_exc()
append_result("Failed terminating SSN node.", str(err))
sys.exit(1) | []
| []
| [
"request_id",
"conf_resource"
]
| [] | ["request_id", "conf_resource"] | python | 2 | 0 | |
app/openlxp_xia_edx_project/celery.py | import logging
import os
from celery import Celery
logger = logging.getLogger('dict_config_logger')
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'openlxp_xia_edx_project.settings')
app = Celery('openlxp_xia_edx_project')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
day2/tf2-dvc-cnn-evaluate.py |
# coding: utf-8
# # Dogs-vs-cats classification with CNNs
#
# In this notebook, we'll train a convolutional neural network (CNN,
# ConvNet) to classify images of dogs from images of cats using
# TensorFlow 2.0 / Keras. This notebook is largely based on the blog
# post [Building powerful image classification models using very
# little data]
# (https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html)
# by François Chollet.
#
# **Note that using a GPU with this notebook is highly recommended.**
#
# First, the needed imports.
import os, datetime, sys
import random
import pathlib
import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import (Dense, Activation, Dropout, Conv2D,
Flatten, MaxPooling2D, InputLayer)
from tensorflow.keras.preprocessing.image import (ImageDataGenerator,
array_to_img,
img_to_array, load_img)
from tensorflow.keras import applications, optimizers
from tensorflow.keras.callbacks import TensorBoard
import numpy as np
print('Using Tensorflow version:', tf.__version__,
'Keras version:', tf.keras.__version__,
'backend:', tf.keras.backend.backend())
# ## Data
#
# The test set consists of 22000 images.
if 'DATADIR' in os.environ:
DATADIR = os.environ['DATADIR']
else:
DATADIR = "/scratch/project_2003747/data/"
datapath = os.path.join(DATADIR, "dogs-vs-cats/train-2000/")
nimages = dict()
nimages['test'] = 22000
# ### Image paths and labels
def get_paths(dataset):
data_root = pathlib.Path(datapath+dataset)
image_paths = list(data_root.glob('*/*'))
image_paths = [str(path) for path in image_paths]
image_count = len(image_paths)
assert image_count == nimages[dataset], "Found {} images, expected {}".format(image_count, nimages[dataset])
return image_paths
image_paths = dict()
image_paths['test'] = get_paths('test')
label_names = sorted(item.name for item in pathlib.Path(datapath+'train').glob('*/')
if item.is_dir())
label_to_index = dict((name, index) for index,name in enumerate(label_names))
def get_labels(dataset):
return [label_to_index[pathlib.Path(path).parent.name]
for path in image_paths[dataset]]
image_labels = dict()
image_labels['test'] = get_labels('test')
# ### Data augmentation
#
# We need to resize all test images to a fixed size. Here we'll use
# 160x160 pixels.
#
# Unlike the training images, we do not apply any random
# transformations to the test images.
INPUT_IMAGE_SIZE = [160, 160, 3]
def preprocess_image(image, augment):
image = tf.image.decode_jpeg(image, channels=3)
if augment:
image = tf.image.resize(image, [256, 256])
image = tf.image.random_crop(image, INPUT_IMAGE_SIZE)
if random.random() < 0.5:
image = tf.image.flip_left_right(image)
else:
image = tf.image.resize(image, INPUT_IMAGE_SIZE[:2])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_augment_image(path, label):
image = tf.io.read_file(path)
return preprocess_image(image, True), label
def load_and_not_augment_image(path, label):
image = tf.io.read_file(path)
return preprocess_image(image, False), label
# ### TF Datasets
#
# Let's now define our TF Dataset
# (https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/data/Dataset#class_dataset)
# for the test data. First the Datasets contain the filenames of the
# images and the corresponding labels.
test_dataset = tf.data.Dataset.from_tensor_slices((image_paths['test'],
image_labels['test']))
# We then map() the filenames to the actual image data and decode the images.
BATCH_SIZE = 32
test_dataset = test_dataset.map(load_and_not_augment_image, num_parallel_calls=10)
test_dataset = test_dataset.batch(BATCH_SIZE, drop_remainder=False)
test_dataset = test_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# ### Initialization
if len(sys.argv)<2:
print('ERROR: model file missing')
sys.exit()
model = load_model(sys.argv[1])
print(model.summary())
# ### Inference
print('Evaluating model', sys.argv[1])
scores = model.evaluate(test_dataset, verbose=2)
print("Test set %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
| []
| []
| [
"DATADIR"
]
| [] | ["DATADIR"] | python | 1 | 0 | |
src/main/java/org/traccar/api/HealthCheckService.java | /*
* Copyright 2020 Anton Tananaev ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.traccar.api;
import com.sun.jna.Library;
import com.sun.jna.Native;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.traccar.Context;
import org.traccar.config.Keys;
import java.util.TimerTask;
public class HealthCheckService {
private static final Logger LOGGER = LoggerFactory.getLogger(HealthCheckService.class);
private SystemD systemD;
private boolean enabled;
private long period;
public HealthCheckService() {
if (!Context.getConfig().getBoolean(Keys.WEB_DISABLE_HEALTH_CHECK)
&& System.getProperty("os.name").toLowerCase().startsWith("linux")) {
try {
systemD = Native.load("systemd", SystemD.class);
String watchdogTimer = System.getenv("WATCHDOG_USEC");
if (watchdogTimer != null && !watchdogTimer.isEmpty()) {
period = Long.parseLong(watchdogTimer) / 1000 * 4 / 5;
}
if (period > 0) {
LOGGER.info("Health check enabled with period {}", period);
enabled = true;
}
} catch (UnsatisfiedLinkError e) {
LOGGER.warn("No systemd support", e);
}
}
}
public boolean isEnabled() {
return enabled;
}
public long getPeriod() {
return period;
}
private String getUrl() {
String address = Context.getConfig().getString(Keys.WEB_ADDRESS, "localhost");
int port = Context.getConfig().getInteger(Keys.WEB_PORT);
return "http://" + address + ":" + port + "/api/server";
}
public TimerTask createTask() {
return new TimerTask() {
@Override
public void run() {
LOGGER.debug("Health check running");
int status = Context.getClient().target(getUrl()).request().get().getStatus();
if (status == 200) {
int result = systemD.sd_notify(0, "WATCHDOG=1");
if (result < 0) {
LOGGER.warn("Health check notify error {}", result);
}
} else {
LOGGER.warn("Health check failed with status {}", status);
}
}
};
}
interface SystemD extends Library {
@SuppressWarnings("checkstyle:MethodName")
int sd_notify(@SuppressWarnings("checkstyle:ParameterName") int unset_environment, String state);
}
}
| [
"\"WATCHDOG_USEC\""
]
| []
| [
"WATCHDOG_USEC"
]
| [] | ["WATCHDOG_USEC"] | java | 1 | 0 | |
meusExemplos/kive_venv/Lib/site-packages/kivy_deps/glew/__init__.py |
"""The following code is required to make the dependency binaries available to
kivy when it imports this package.
"""
import sys
import os
from os.path import join, isdir, dirname
import site
__all__ = ('dep_bins', )
__version__ = '0.3.1'
dep_bins = []
"""A list of paths that contain the binaries of this distribution.
Can be used e.g. with pyinstaller to ensure it copies all the binaries.
"""
for d in [sys.prefix, site.USER_BASE]:
p = join(d, 'share', 'glew', 'bin')
if isdir(p):
os.environ["PATH"] = p + os.pathsep + os.environ["PATH"]
if hasattr(os, 'add_dll_directory'):
os.add_dll_directory(p)
dep_bins.append(p)
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
sdk/identity/azure-identity/tests/test_managed_identity.py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import time
try:
from unittest import mock
except ImportError: # python < 3.3
import mock # type: ignore
from azure.core.credentials import AccessToken
from azure.core.exceptions import ClientAuthenticationError
from azure.identity import ManagedIdentityCredential
from azure.identity._constants import EnvironmentVariables
from azure.identity._credentials.imds import IMDS_AUTHORITY, IMDS_TOKEN_PATH
from azure.identity._internal.user_agent import USER_AGENT
import pytest
from helpers import build_aad_response, validating_transport, mock_response, Request
MANAGED_IDENTITY_ENVIRON = "azure.identity._credentials.managed_identity.os.environ"
ALL_ENVIRONMENTS = (
{EnvironmentVariables.MSI_ENDPOINT: "...", EnvironmentVariables.MSI_SECRET: "..."}, # App Service
{EnvironmentVariables.MSI_ENDPOINT: "..."}, # Cloud Shell
{ # Service Fabric
EnvironmentVariables.IDENTITY_ENDPOINT: "...",
EnvironmentVariables.IDENTITY_HEADER: "...",
EnvironmentVariables.IDENTITY_SERVER_THUMBPRINT: "...",
},
{EnvironmentVariables.IDENTITY_ENDPOINT: "...", EnvironmentVariables.IMDS_ENDPOINT: "..."}, # Arc
{ # token exchange
EnvironmentVariables.AZURE_CLIENT_ID: "...",
EnvironmentVariables.AZURE_TENANT_ID: "...",
EnvironmentVariables.AZURE_FEDERATED_TOKEN_FILE: __file__,
},
{}, # IMDS
)
@pytest.mark.parametrize("environ", ALL_ENVIRONMENTS)
def test_close(environ):
transport = mock.MagicMock()
with mock.patch.dict("os.environ", environ, clear=True):
credential = ManagedIdentityCredential(transport=transport)
assert transport.__exit__.call_count == 0
credential.close()
assert transport.__exit__.call_count == 1
@pytest.mark.parametrize("environ", ALL_ENVIRONMENTS)
def test_context_manager(environ):
transport = mock.MagicMock()
with mock.patch.dict("os.environ", environ, clear=True):
credential = ManagedIdentityCredential(transport=transport)
with credential:
assert transport.__enter__.call_count == 1
assert transport.__exit__.call_count == 0
assert transport.__enter__.call_count == 1
assert transport.__exit__.call_count == 1
def test_close_incomplete_configuration():
ManagedIdentityCredential().close()
def test_context_manager_incomplete_configuration():
with ManagedIdentityCredential():
pass
ALL_ENVIRONMENTS = (
{EnvironmentVariables.MSI_ENDPOINT: "...", EnvironmentVariables.MSI_SECRET: "..."}, # App Service
{EnvironmentVariables.MSI_ENDPOINT: "..."}, # Cloud Shell
{ # Service Fabric
EnvironmentVariables.IDENTITY_ENDPOINT: "...",
EnvironmentVariables.IDENTITY_HEADER: "...",
EnvironmentVariables.IDENTITY_SERVER_THUMBPRINT: "...",
},
{EnvironmentVariables.IDENTITY_ENDPOINT: "...", EnvironmentVariables.IMDS_ENDPOINT: "..."}, # Arc
{ # token exchange
EnvironmentVariables.AZURE_CLIENT_ID: "...",
EnvironmentVariables.AZURE_TENANT_ID: "...",
EnvironmentVariables.AZURE_FEDERATED_TOKEN_FILE: __file__,
},
{}, # IMDS
)
@pytest.mark.parametrize("environ", ALL_ENVIRONMENTS)
def test_custom_hooks(environ):
"""The credential's pipeline should include azure-core's CustomHookPolicy"""
scope = "scope"
expected_token = "***"
request_hook = mock.Mock()
response_hook = mock.Mock()
now = int(time.time())
expected_response = mock_response(
json_payload={
"access_token": expected_token,
"expires_in": 3600,
"expires_on": now + 3600,
"ext_expires_in": 3600,
"not_before": now,
"resource": scope,
"token_type": "Bearer",
}
)
transport = validating_transport(requests=[Request()] * 2, responses=[expected_response] * 2)
with mock.patch.dict(MANAGED_IDENTITY_ENVIRON, environ, clear=True):
credential = ManagedIdentityCredential(
transport=transport, raw_request_hook=request_hook, raw_response_hook=response_hook
)
credential.get_token(scope)
if environ:
# some environment variables are set, so we're not mocking IMDS and should expect 1 request
assert request_hook.call_count == 1
assert response_hook.call_count == 1
args, kwargs = response_hook.call_args
pipeline_response = args[0]
assert pipeline_response.http_response == expected_response
else:
# we're mocking IMDS and should expect 2 requests
assert request_hook.call_count == 2
assert response_hook.call_count == 2
responses = [args[0].http_response for args, _ in response_hook.call_args_list]
assert responses == [expected_response] * 2
def test_cloud_shell():
"""Cloud Shell environment: only MSI_ENDPOINT set"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
endpoint = "http://localhost:42/token"
scope = "scope"
transport = validating_transport(
requests=[
Request(
base_url=endpoint,
method="POST",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_data={"resource": scope},
)
],
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 0,
"expires_on": expires_on,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
],
)
with mock.patch("os.environ", {EnvironmentVariables.MSI_ENDPOINT: endpoint}):
token = ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
def test_azure_ml():
"""Azure ML: MSI_ENDPOINT, MSI_SECRET set (like App Service 2017-09-01 but with a different response format)"""
expected_token = AccessToken("****", int(time.time()) + 3600)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
client_id = "client"
transport = validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
),
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope, "clientid": client_id},
),
],
responses=[
mock_response(
json_payload={
"access_token": expected_token.token,
"expires_in": 3600,
"expires_on": expected_token.expires_on,
"resource": scope,
"token_type": "Bearer",
}
)
]
* 2,
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: url, EnvironmentVariables.MSI_SECRET: secret},
clear=True,
):
token = ManagedIdentityCredential(transport=transport).get_token(scope)
assert token.token == expected_token.token
assert token.expires_on == expected_token.expires_on
token = ManagedIdentityCredential(transport=transport, client_id=client_id).get_token(scope)
assert token.token == expected_token.token
assert token.expires_on == expected_token.expires_on
def test_cloud_shell_user_assigned_identity():
"""Cloud Shell environment: only MSI_ENDPOINT set"""
expected_token = "****"
expires_on = 42
client_id = "some-guid"
endpoint = "http://localhost:42/token"
scope = "scope"
param_name, param_value = "foo", "bar"
transport = validating_transport(
requests=[
Request(
base_url=endpoint,
method="POST",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_data={"client_id": client_id, "resource": scope},
),
Request(
base_url=endpoint,
method="POST",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_data={"resource": scope, param_name: param_value},
),
],
responses=[
mock_response(
json_payload={
"access_token": expected_token,
"expires_in": 0,
"expires_on": expires_on,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
]
* 2,
)
with mock.patch.dict(MANAGED_IDENTITY_ENVIRON, {EnvironmentVariables.MSI_ENDPOINT: endpoint}, clear=True):
token = ManagedIdentityCredential(client_id=client_id, transport=transport).get_token(scope)
assert token.token == expected_token
assert token.expires_on == expires_on
credential = ManagedIdentityCredential(transport=transport, identity_config={param_name: param_value})
token = credential.get_token(scope)
assert token.token == expected_token
assert token.expires_on == expires_on
def test_prefers_app_service_2017_09_01():
"""When the environment is configured for both App Service versions, the credential should prefer 2017-09-01
Support for 2019-08-01 was removed due to https://github.com/Azure/azure-sdk-for-python/issues/14670. This test
should be removed when that support is added back.
"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
transport = validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
)
]
* 2,
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on), # linux format
"resource": scope,
"token_type": "Bearer",
}
),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "1/1/1970 12:00:{} AM +00:00".format(expires_on), # windows format
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{
EnvironmentVariables.IDENTITY_ENDPOINT: url,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.MSI_ENDPOINT: url,
EnvironmentVariables.MSI_SECRET: secret,
},
clear=True,
):
token = ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
token = ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
@pytest.mark.skip(
"2019-08-01 support was removed due to https://github.com/Azure/azure-sdk-for-python/issues/14670. This test should be enabled when that support is added back."
)
def test_prefers_app_service_2019_08_01():
"""When the environment is configured for both App Service versions, the credential should prefer the most recent"""
access_token = "****"
expires_on = 42
endpoint = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
transport = validating_transport(
requests=[
Request(
base_url=endpoint,
method="GET",
required_headers={"X-IDENTITY-HEADER": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2019-08-01", "resource": scope},
)
],
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_on": str(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
],
)
environ = {
EnvironmentVariables.IDENTITY_ENDPOINT: endpoint,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.MSI_ENDPOINT: endpoint,
EnvironmentVariables.MSI_SECRET: secret,
}
with mock.patch.dict("os.environ", environ, clear=True):
token = ManagedIdentityCredential(transport=transport).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
@pytest.mark.skip(
"2019-08-01 support was removed due to https://github.com/Azure/azure-sdk-for-python/issues/14670. This test should be enabled when that support is added back."
)
def test_app_service_2019_08_01():
"""App Service 2019-08-01: IDENTITY_ENDPOINT, IDENTITY_HEADER set"""
access_token = "****"
expires_on = 42
endpoint = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
def send(request, **_):
assert request.url.startswith(endpoint)
assert request.method == "GET"
assert request.headers["X-IDENTITY-HEADER"] == secret
assert request.headers["User-Agent"] == USER_AGENT
assert request.query["api-version"] == "2019-08-01"
assert request.query["resource"] == scope
return mock_response(
json_payload={
"access_token": access_token,
"expires_on": str(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
# when configuration for both API versions is present, the credential should prefer the most recent
for environment in [
{EnvironmentVariables.IDENTITY_ENDPOINT: endpoint, EnvironmentVariables.IDENTITY_HEADER: secret},
{
EnvironmentVariables.IDENTITY_ENDPOINT: endpoint,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.MSI_ENDPOINT: endpoint,
EnvironmentVariables.MSI_SECRET: secret,
},
]:
with mock.patch.dict("os.environ", environment, clear=True):
token = ManagedIdentityCredential(transport=mock.Mock(send=send)).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
def test_app_service_2017_09_01():
"""test parsing of App Service MSI 2017-09-01's eccentric platform-dependent expires_on strings"""
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
url = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
transport = validating_transport(
requests=[
Request(
url,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "resource": scope},
)
]
* 2,
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on), # linux format
"resource": scope,
"token_type": "Bearer",
}
),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": "1/1/1970 12:00:{} AM +00:00".format(expires_on), # windows format
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: url, EnvironmentVariables.MSI_SECRET: secret},
clear=True,
):
token = ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
token = ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
assert token.expires_on == expires_on
def test_app_service_user_assigned_identity():
"""App Service 2017-09-01: MSI_ENDPOINT, MSI_SECRET set"""
expected_token = "****"
expires_on = 42
client_id = "some-guid"
endpoint = "http://localhost:42/token"
secret = "expected-secret"
scope = "scope"
param_name, param_value = "foo", "bar"
transport = validating_transport(
requests=[
Request(
base_url=endpoint,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={"api-version": "2017-09-01", "clientid": client_id, "resource": scope},
),
Request(
base_url=endpoint,
method="GET",
required_headers={"secret": secret, "User-Agent": USER_AGENT},
required_params={
"api-version": "2017-09-01",
"clientid": client_id,
"resource": scope,
param_name: param_value,
},
),
],
responses=[
mock_response(
json_payload={
"access_token": expected_token,
"expires_on": "01/01/1970 00:00:{} +00:00".format(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
]
* 2,
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: endpoint, EnvironmentVariables.MSI_SECRET: secret},
clear=True,
):
token = ManagedIdentityCredential(client_id=client_id, transport=transport).get_token(scope)
assert token.token == expected_token
assert token.expires_on == expires_on
credential = ManagedIdentityCredential(
client_id=client_id, transport=transport, identity_config={param_name: param_value}
)
token = credential.get_token(scope)
assert token.token == expected_token
assert token.expires_on == expires_on
def test_imds():
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
scope = "scope"
transport = validating_transport(
requests=[
Request(base_url=IMDS_AUTHORITY + IMDS_TOKEN_PATH),
Request(
base_url=IMDS_AUTHORITY + IMDS_TOKEN_PATH,
method="GET",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_params={"api-version": "2018-02-01", "resource": scope},
),
],
responses=[
# probe receives error response
mock_response(status_code=400, json_payload={"error": "this is an error message"}),
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 42,
"expires_on": expires_on,
"ext_expires_in": 42,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
),
],
)
# ensure e.g. $MSI_ENDPOINT isn't set, so we get ImdsCredential
with mock.patch.dict("os.environ", clear=True):
token = ManagedIdentityCredential(transport=transport).get_token(scope)
assert token == expected_token
def test_client_id_none():
"""the credential should ignore client_id=None"""
expected_access_token = "****"
scope = "scope"
def send(request, **_):
assert "client_id" not in request.query # IMDS
assert "clientid" not in request.query # App Service 2017-09-01
if request.data:
assert "client_id" not in request.body # Cloud Shell
return mock_response(
json_payload=(build_aad_response(access_token=expected_access_token, expires_on="42", resource=scope))
)
# IMDS
credential = ManagedIdentityCredential(client_id=None, transport=mock.Mock(send=send))
token = credential.get_token(scope)
assert token.token == expected_access_token
# Cloud Shell
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON, {EnvironmentVariables.MSI_ENDPOINT: "https://localhost"}, clear=True
):
credential = ManagedIdentityCredential(client_id=None, transport=mock.Mock(send=send))
token = credential.get_token(scope)
assert token.token == expected_access_token
def test_client_id_none_app_service_2017_09_01():
"""The credential should ignore client_id=None.
App Service 2017-09-01 must be tested separately due to its eccentric expires_on format.
"""
expected_access_token = "****"
scope = "scope"
def send(request, **_):
assert "client_id" not in request.query
assert "clientid" not in request.query
return mock_response(
json_payload=(
build_aad_response(
access_token=expected_access_token, expires_on="01/01/1970 00:00:42 +00:00", resource=scope
)
)
)
with mock.patch.dict(
MANAGED_IDENTITY_ENVIRON,
{EnvironmentVariables.MSI_ENDPOINT: "https://localhost", EnvironmentVariables.MSI_SECRET: "secret"},
clear=True,
):
credential = ManagedIdentityCredential(client_id=None, transport=mock.Mock(send=send))
token = credential.get_token(scope)
assert token.token == expected_access_token
def test_imds_user_assigned_identity():
access_token = "****"
expires_on = 42
expected_token = AccessToken(access_token, expires_on)
endpoint = IMDS_AUTHORITY + IMDS_TOKEN_PATH
scope = "scope"
client_id = "some-guid"
transport = validating_transport(
requests=[
Request(base_url=endpoint), # first request should be availability probe => match only the URL
Request(
base_url=endpoint,
method="GET",
required_headers={"Metadata": "true", "User-Agent": USER_AGENT},
required_params={"api-version": "2018-02-01", "client_id": client_id, "resource": scope},
),
],
responses=[
# probe receives error response
mock_response(status_code=400, json_payload={"error": "this is an error message"}),
mock_response(
json_payload={
"access_token": access_token,
"client_id": client_id,
"expires_in": 42,
"expires_on": expires_on,
"ext_expires_in": 42,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
),
],
)
# ensure e.g. $MSI_ENDPOINT isn't set, so we get ImdsCredential
with mock.patch.dict("os.environ", clear=True):
token = ManagedIdentityCredential(client_id=client_id, transport=transport).get_token(scope)
assert token == expected_token
def test_service_fabric():
"""Service Fabric 2019-07-01-preview"""
access_token = "****"
expires_on = 42
endpoint = "http://localhost:42/token"
secret = "expected-secret"
thumbprint = "SHA1HEX"
scope = "scope"
def send(request, **_):
assert request.url.startswith(endpoint)
assert request.method == "GET"
assert request.headers["Secret"] == secret
assert request.query["api-version"] == "2019-07-01-preview"
assert request.query["resource"] == scope
return mock_response(
json_payload={
"access_token": access_token,
"expires_on": str(expires_on),
"resource": scope,
"token_type": "Bearer",
}
)
with mock.patch(
"os.environ",
{
EnvironmentVariables.IDENTITY_ENDPOINT: endpoint,
EnvironmentVariables.IDENTITY_HEADER: secret,
EnvironmentVariables.IDENTITY_SERVER_THUMBPRINT: thumbprint,
},
):
token = ManagedIdentityCredential(transport=mock.Mock(send=send)).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
def test_azure_arc(tmpdir):
"""Azure Arc 2019-11-01"""
access_token = "****"
api_version = "2019-11-01"
expires_on = 42
identity_endpoint = "http://localhost:42/token"
imds_endpoint = "http://localhost:42"
scope = "scope"
secret_key = "XXXX"
key_file = tmpdir.mkdir("key").join("key_file.key")
key_file.write(secret_key)
assert key_file.read() == secret_key
key_path = os.path.join(key_file.dirname, key_file.basename)
transport = validating_transport(
requests=[
Request(
base_url=identity_endpoint,
method="GET",
required_headers={"Metadata": "true"},
required_params={"api-version": api_version, "resource": scope},
),
Request(
base_url=identity_endpoint,
method="GET",
required_headers={"Metadata": "true", "Authorization": "Basic {}".format(secret_key)},
required_params={"api-version": api_version, "resource": scope},
),
],
responses=[
# first response gives path to authentication key
mock_response(status_code=401, headers={"WWW-Authenticate": "Basic realm={}".format(key_path)}),
mock_response(
json_payload={
"access_token": access_token,
"expires_on": expires_on,
"resource": scope,
"token_type": "Bearer",
}
),
],
)
with mock.patch(
"os.environ",
{EnvironmentVariables.IDENTITY_ENDPOINT: identity_endpoint, EnvironmentVariables.IMDS_ENDPOINT: imds_endpoint},
):
token = ManagedIdentityCredential(transport=transport).get_token(scope)
assert token.token == access_token
assert token.expires_on == expires_on
def test_azure_arc_client_id():
"""Azure Arc doesn't support user-assigned managed identity"""
with mock.patch(
"os.environ",
{
EnvironmentVariables.IDENTITY_ENDPOINT: "http://localhost:42/token",
EnvironmentVariables.IMDS_ENDPOINT: "http://localhost:42",
},
):
credential = ManagedIdentityCredential(client_id="some-guid")
with pytest.raises(ClientAuthenticationError):
credential.get_token("scope")
def test_token_exchange(tmpdir):
exchange_token = "exchange-token"
token_file = tmpdir.join("token")
token_file.write(exchange_token)
access_token = "***"
authority = "https://localhost"
client_id = "client_id"
tenant = "tenant_id"
scope = "scope"
transport = validating_transport(
requests=[
Request(
base_url=authority,
method="POST",
required_data={
"client_assertion": exchange_token,
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_id": client_id,
"grant_type": "client_credentials",
"scope": scope,
},
)
],
responses=[
mock_response(
json_payload={
"access_token": access_token,
"expires_in": 3600,
"ext_expires_in": 3600,
"expires_on": int(time.time()) + 3600,
"not_before": int(time.time()),
"resource": scope,
"token_type": "Bearer",
}
)
],
)
with mock.patch.dict(
"os.environ",
{
EnvironmentVariables.AZURE_AUTHORITY_HOST: authority,
EnvironmentVariables.AZURE_CLIENT_ID: client_id,
EnvironmentVariables.AZURE_TENANT_ID: tenant,
EnvironmentVariables.AZURE_FEDERATED_TOKEN_FILE: token_file.strpath,
},
clear=True,
):
credential = ManagedIdentityCredential(transport=transport)
token = credential.get_token(scope)
assert token.token == access_token
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main.go | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/line/line-bot-sdk-go/linebot"
)
var bot *linebot.Client
func main() {
var err error
bot, err = linebot.New(os.Getenv("ChannelSecret"), os.Getenv("ChannelAccessToken"))
log.Println("Bot:", bot, " err:", err)
http.HandleFunc("/callback", callbackHandler)
port := os.Getenv("PORT")
addr := fmt.Sprintf(":%s", port)
http.ListenAndServe(addr, nil)
}
func callbackHandler(w http.ResponseWriter, r *http.Request) {
events, err := bot.ParseRequest(r)
if err != nil {
if err == linebot.ErrInvalidSignature {
w.WriteHeader(400)
} else {
w.WriteHeader(500)
}
return
}
for _, event := range events {
if event.Type == linebot.EventTypeMessage {
switch message := event.Message.(type) {
case *linebot.TextMessage:
if message.Text == "MyID" {
if _, err = bot.ReplyMessage(event.ReplyToken, linebot.NewTextMessage( "Line_ID:"+event.Source.UserID+"已取得,請傳給九桃")).Do(); err != nil {
log.Print(err)
}
}
}
}
}
}
| [
"\"ChannelSecret\"",
"\"ChannelAccessToken\"",
"\"PORT\""
]
| []
| [
"PORT",
"ChannelSecret",
"ChannelAccessToken"
]
| [] | ["PORT", "ChannelSecret", "ChannelAccessToken"] | go | 3 | 0 | |
tools/launch_ccm_cluster.py | #!/usr/bin/env python3
# Launches a CCM cluster
#
# stdout:
# {'id': ...
# 'url': ...,
# 'auth_token': ...}
#
# cluster.properties file (if WORKSPACE is set in env):
# CLUSTER_ID=...
# CLUSTER_URL=...
# CLUSTER_AUTH_TOKEN=...
#
# Configuration: Mostly through env vars. See README.md.
import argparse
import http.client
import json
import logging
import os
import pprint
import random
import socket
import string
import sys
import time
import configure_test_cluster
import dcos_login
import github_update
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(message)s")
class ClusterActionException(Exception):
pass
class CCMLauncher(object):
# NOTE: this will need to be updated once 'stable' is no longer 1.7
_DCOS_17_CHANNELS = ['testing/continuous', 'stable']
# From mesosphere/cloud-cluster-manager/app/models.py:
_CCM_STATUSES = {
0: 'RUNNING',
3: 'CREATING',
4: 'DELETING',
5: 'DELETED',
6: 'DELETION_FAIL',
7: 'CREATING_ERROR',
8: 'RUNNING_NEEDS_INFO'
}
# Reverse (name => number):
_CCM_STATUS_LABELS = {v: k for k, v in _CCM_STATUSES.items()}
_CCM_HOST = 'ccm.mesosphere.com'
_CCM_PATH = '/api/cluster/'
DEFAULT_TIMEOUT_MINS = 45
DEFAULT_ATTEMPTS = 2
def __init__(self, ccm_token, github_label):
self._http_headers = {'Authorization': 'Token ' + ccm_token}
self._dry_run = os.environ.get('DRY_RUN', '')
self._github_updater = github_update.GithubStatusUpdater('cluster:{}'.format(github_label))
def _rand_str(self, size):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(size))
def _pretty_time(self, seconds):
if seconds > 60:
disp_seconds = seconds % 60
return '{:.0f}m{:.0f}s'.format((seconds - disp_seconds) / 60, disp_seconds)
else:
return '{:.0f}s'.format(seconds)
def _retry(self, attempts, method, arg, operation_name):
for i in range(attempts):
attempt_str = '[{}/{}]'.format(i + 1, attempts)
try:
self._github_updater.update('pending', '{} {} in progress'.format(attempt_str, operation_name.title()))
result = method.__call__(arg)
self._github_updater.update('success', '{} {} succeeded'.format(attempt_str, operation_name.title()))
return result
except (ClusterActionException, socket.error) as e:
if i + 1 == attempts:
logger.error('{} Final attempt failed, giving up: {}'.format(attempt_str, e))
self._github_updater.update('error', '{} {} failed'.format(attempt_str, operation_name.title()))
raise
else:
logger.error('{} Previous attempt failed, retrying: {}\n'.format(attempt_str, e))
def _query_http(self, request_method, request_path,
request_json_payload=None,
log_error=True,
debug=False):
if self._dry_run:
logger.info('[DRY RUN] {} https://{}{}'.format(request_method, self._CCM_HOST, request_path))
if request_json_payload:
logger.info('[DRY RUN] Payload: {}'.format(pprint.pformat(request_json_payload)))
return None
conn = http.client.HTTPSConnection(self._CCM_HOST)
if debug:
conn.set_debuglevel(999)
request_headers = self._http_headers.copy()
if request_json_payload:
request_body = json.dumps(request_json_payload).encode('utf-8')
request_headers['Content-Type'] = 'application/json'
else:
request_body = None
conn.request(
request_method,
request_path,
body = request_body,
headers = request_headers)
response = conn.getresponse()
if log_error and (response.status < 200 or response.status >= 300):
logger.error('Got {} response to HTTP request:'.format(response.status))
logger.error('Request: {} https://{}{}'.format(request_method, self._CCM_HOST, request_path))
logger.error('Response:')
logger.error(' - Status: {} {}'.format(response.status, str(response.msg).strip()))
logger.error(' - Headers: {}'.format(pprint.pformat(response.getheaders())))
logger.error(' - Body: {}'.format(pprint.pformat(response.read())))
return None
elif debug:
logger.debug('{}: {}'.format(response.status, str(response.msg).strip()))
logger.debug(pprint.pformat(response.getheaders()))
return response
def wait_for_status(self, cluster_id, pending_status_labels, complete_status_label, timeout_minutes):
logger.info('Waiting {} minutes for cluster {} to transition from {} to {}'.format(
timeout_minutes, cluster_id, ', '.join(pending_status_labels), complete_status_label))
pending_state_codes = [self._CCM_STATUS_LABELS[label] for label in pending_status_labels]
complete_state_code = self._CCM_STATUS_LABELS[complete_status_label]
start_time = time.time()
stop_time = start_time + (60 * timeout_minutes)
sleep_duration_s = 1
now = start_time
while now < stop_time:
if sleep_duration_s < 32:
sleep_duration_s *= 2
response = self._query_http('GET', self._CCM_PATH + str(cluster_id) + '/')
if response:
status_json = json.loads(response.read().decode('utf-8'))
status_code = status_json.get('status', -1)
status_label = self._CCM_STATUSES.get(status_code, 'unknown:{}'.format(status_code))
if status_code == complete_state_code:
# additional check: does the cluster have a non-empty 'cluster_info'?
cluster_info_str = status_json.get('cluster_info', '')
if cluster_info_str:
# cluster_info in the CCM API is a string containing a dict...:
logger.info('Cluster {} has entered state {}, returning cluster_info.'.format(
cluster_id, status_label))
try:
return json.loads(cluster_info_str)
except:
logger.error('Failed to parse cluster_info string as JSON. Operation failed?: "{}"'.format(cluster_info_str))
return None
else:
logger.error('Cluster {} has entered state {}, but lacks cluster_info...'.format(
cluster_id, status_label))
elif status_code not in pending_state_codes:
logger.error('Cluster {} has entered state {}. Giving up.'.format(
cluster_id, status_label))
return None
logger.info('Cluster {} has state {} after {}, refreshing in {}. ({} left)'.format(
cluster_id,
status_label,
self._pretty_time(now - start_time),
self._pretty_time(sleep_duration_s),
self._pretty_time(stop_time - now)))
else:
logger.error('Failed to get cluster {} state after {}, refreshing in {}. ({} left)'.format(
cluster_id,
self._pretty_time(now - start_time),
self._pretty_time(sleep_duration_s),
self._pretty_time(stop_time - now)))
time.sleep(sleep_duration_s)
now = time.time()
logger.error('Giving up after {}'.format(self._pretty_time(60 * timeout_minutes)))
return None
def start(self, config, attempts = DEFAULT_ATTEMPTS):
return self._retry(attempts, self._start, config, 'launch')
def _start(self, config):
is_17_cluster = config.ccm_channel in self._DCOS_17_CHANNELS
template_url = None
if is_17_cluster:
hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos'
elif config.cf_template.startswith('ee.'):
hostrepo = 's3.amazonaws.com/downloads.mesosphere.io/dcos-enterprise-aws-advanced'
# format is different for enterprise security modes.
mode = config.security_mode
if not mode:
logger.warning("No templates known for enterprise & "
"default security (none). Cowardly bringing "
"up a permissive cluster")
mode = 'permissive'
template_url = 'https://{}/{}/{}/cloudformation/{}'.format(
hostrepo, config.ccm_channel, mode, config.cf_template)
else:
hostrepo = 's3-us-west-2.amazonaws.com/downloads.dcos.io/dcos'
# non-ee mode
if not template_url:
template_url = 'https://{}/{}/cloudformation/{}'.format(
hostrepo, config.ccm_channel, config.cf_template)
# external override from DCOS_TEMPLATE_URL
if config.template_url:
template_url = config.template_url
logger.info("Accepting externally provided template_url from environment.")
cluster_name = config.name_prefix + self._rand_str(8)
payload = {
'template_url': template_url,
'name': cluster_name,
'cluster_desc': config.description,
'time': config.duration_mins,
'private_agents': str(config.private_agents),
'public_agents': str(config.public_agents),
'pre_1_8_cluster': is_17_cluster,
'adminlocation': config.admin_location,
'cloud_provider': config.cloud_provider,
'region': config.aws_region
}
logger.info('''Launching cluster:
name={}
agents={} private/{} public
duration={} minutes
mountvols={}
permissions={}
channel={}
template={}
template_url={}'''.format(
cluster_name,
config.private_agents, config.public_agents,
config.duration_mins,
config.mount_volumes,
config.security_mode,
config.ccm_channel,
config.cf_template,
template_url))
response = self._query_http('POST', self._CCM_PATH, request_json_payload=payload)
if not response:
raise ClusterActionException('CCM cluster creation request failed')
response_content = response.read().decode('utf-8')
response_json = json.loads(response_content)
logger.info('Launch response:\n{}'.format(pprint.pformat(response_json)))
cluster_id = int(response_json.get('id', 0))
if not cluster_id:
raise ClusterActionException('No Cluster ID returned in cluster creation response: {}'.format(response_content))
stack_id = response_json.get('stack_id', '')
if not stack_id:
raise ClusterActionException('No Stack ID returned in cluster creation response: {}'.format(response_content))
cluster_info = self.wait_for_status(
cluster_id,
['CREATING', 'RUNNING_NEEDS_INFO'], # pending states
'RUNNING', # desired state
config.start_timeout_mins)
if not cluster_info:
raise ClusterActionException('CCM cluster creation failed or timed out')
dns_address = cluster_info.get('DnsAddress', '')
if not dns_address:
raise ClusterActionException('CCM cluster_info is missing DnsAddress: {}'.format(cluster_info))
logger.info('Cluster is now RUNNING: {}'.format(cluster_info))
# we fetch the token once up-front because on Open clusters it must be reused.
# given that, we may as well use the same flow across both Open and EE.
logger.info('Fetching auth token')
dcos_url = 'https://' + dns_address
auth_token = dcos_login.DCOSLogin(dcos_url).get_acs_token()
if config.postlaunch_steps != 'none':
is_enterprise = config.cf_template.startswith('ee.')
clustinit = configure_test_cluster.ClusterInitializer(cluster_id,
stack_id, auth_token, dns_address, is_enterprise,
config.security_mode)
initmaster = True
if config.postlaunch_steps == 'nomaster':
initmaster = False
clustinit.apply_default_config(initmaster=initmaster)
if config.mount_volumes:
clustinit.create_mount_volumes()
return {
'id': cluster_id,
'url': dcos_url,
'auth_token': auth_token
}
def stop(self, config, attempts = DEFAULT_ATTEMPTS):
return self._retry(attempts, self._stop, config, 'shutdown')
def trigger_stop(self, config):
self._stop(config, False)
def _stop(self, config, wait=True):
logger.info('Deleting cluster #{}'.format(config.cluster_id))
response = self._query_http('DELETE', self._CCM_PATH + config.cluster_id + '/')
if not response:
raise ClusterActionException('CCM cluster deletion request failed')
if wait:
cluster_info = self.wait_for_status(
config.cluster_id,
['DELETING'],
'DELETED',
config.stop_timeout_mins)
if not cluster_info:
raise ClusterActionException('CCM cluster deletion failed or timed out')
logger.info(pprint.pformat(cluster_info))
else:
logger.info('Delete triggered, exiting.')
class StartConfig(object):
def __init__(
self,
name_prefix = 'infinity-test-',
description = '',
duration_mins = 240,
ccm_channel = 'testing/master',
cf_template = 'ee.single-master.cloudformation.json',
start_timeout_mins = CCMLauncher.DEFAULT_TIMEOUT_MINS,
public_agents = 0,
private_agents = 1,
aws_region = 'us-west-2',
admin_location = '0.0.0.0/0',
cloud_provider = '0', # https://mesosphere.atlassian.net/browse/TEST-231
mount_volumes = False,
postlaunch_steps='default'):
self.name_prefix = name_prefix
self.duration_mins = int(os.environ.get('CCM_DURATION_MINS', duration_mins))
self.ccm_channel = os.environ.get('CCM_CHANNEL', ccm_channel)
self.cf_template = os.environ.get('CCM_TEMPLATE', cf_template)
self.start_timeout_mins = int(os.environ.get('CCM_TIMEOUT_MINS', start_timeout_mins))
self.public_agents = int(os.environ.get('CCM_PUBLIC_AGENTS', public_agents))
self.private_agents = int(os.environ.get('CCM_AGENTS', private_agents))
self.aws_region = os.environ.get('CCM_AWS_REGION', aws_region)
self.admin_location = os.environ.get('CCM_ADMIN_LOCATION', admin_location)
self.cloud_provider = os.environ.get('CCM_CLOUD_PROVIDER', cloud_provider)
self.mount_volumes = bool(os.environ.get('CCM_MOUNT_VOLUMES', mount_volumes))
self.security_mode = os.environ.get('SECURITY')
if self.security_mode == 'default':
self.security_mode = None
if not self.security_mode in ('strict', 'permissive', None):
raise Exception("Unknown value for SECURITY: %s" %
self.security_mode)
self.template_url = os.environ.get('DCOS_TEMPLATE_URL', None)
if not description:
description = 'A test cluster with {} private/{} public agents'.format(
self.private_agents, self.public_agents)
self.description = description
self.postlaunch_steps = postlaunch_steps
class StopConfig(object):
def __init__(
self,
cluster_id,
stop_timeout_mins = CCMLauncher.DEFAULT_TIMEOUT_MINS):
self.cluster_id = cluster_id
self.stop_timeout_mins = os.environ.get('CCM_TIMEOUT_MINS', stop_timeout_mins)
def _write_jenkins_config(github_label, cluster_info, error = None):
if not 'WORKSPACE' in os.environ:
return
# write jenkins properties file to $WORKSPACE/cluster-$CCM_GITHUB_LABEL.properties:
properties_path = os.path.join(os.environ['WORKSPACE'], 'cluster-{}.properties'.format(github_label))
logger.info('Writing cluster properties to {}'.format(properties_path))
properties_file = open(properties_path, 'w')
properties_file.write('CLUSTER_ID={}\n'.format(cluster_info.get('id', '0')))
properties_file.write('CLUSTER_URL={}\n'.format(cluster_info.get('url', '')))
properties_file.write('CLUSTER_AUTH_TOKEN={}\n'.format(cluster_info.get('auth_token', '')))
if error:
properties_file.write('ERROR={}\n'.format(error))
properties_file.flush()
properties_file.close()
def _determine_attempts():
attemptcount = os.environ.get('CCM_ATTEMPTS', CCMLauncher.DEFAULT_ATTEMPTS)
return int(attemptcount)
def determine_github_label():
label = os.environ.get('CCM_GITHUB_LABEL', '')
if not label:
label = os.environ.get('TEST_GITHUB_LABEL', 'ccm')
return label
def start_cluster(ccm_token, launch_config=None):
"One stop shop to launch a cluster for external users"
github_label = determine_github_label()
launcher = CCMLauncher(ccm_token, github_label)
attempts = _determine_attempts()
if not launch_config:
launch_config = StartConfig()
return _start_cluster(launcher, github_label, attempts, launch_config)
def _start_cluster(launcher, github_label, start_stop_attempts, config):
try:
cluster_info = launcher.start(config, start_stop_attempts)
# print to stdout (the rest of this script only writes to stderr):
print(json.dumps(cluster_info))
_write_jenkins_config(github_label, cluster_info)
except Exception as e:
_write_jenkins_config(github_label, {}, e)
raise
return cluster_info
def parse_args(argv):
parser = argparse.ArgumentParser(prog='launch_ccm_cluster.py',
description="create and manage cloud cluster manager clusters")
parser.add_argument("--configure",
choices=('default', 'nomaster', 'none'),
default='default',
help='What configuration steps to use to set up the cluster [%(default)s]')
parser.add_argument("--output",
metavar="filename",
help='Write the cluster info to this filename, in addition to standard out')
subparsers = parser.add_subparsers(
help='An action other than the default start', dest='command')
start_parser = subparsers.add_parser('start',
help='launch a new cluster (this happens with no command as well)')
start_parser.add_argument("--configure",
choices=('default', 'nomaster', 'none'),
default='default',
help='What configuration steps to use to set up the cluster [%(default)s]')
start_parser.add_argument("--output",
metavar="filename",
help='Write the cluster info to this filename, in addition to standard out')
msg='ask CCM to stop a cluster and block until this completes'
stop_parser = subparsers.add_parser('stop',
help=msg, description=msg)
stop_parser.add_argument('ccm_id', help='the cluster id to stop')
msg='ask CCM to stop a cluster without blocking'
trigstop_parser = subparsers.add_parser('trigger-stop',
help=msg, description=msg)
trigstop_parser.add_argument('ccm_id', help='the cluster id to stop')
msg = 'wait for a CCM cluster to transition from one state to another'
statuses = CCMLauncher._CCM_STATUSES.values()
wait_parser = subparsers.add_parser('wait',
help=msg,
description=msg + "; valid states are ({})".format(", ".join(statuses)))
wait_parser.add_argument('ccm_id', help='the cluster id to stop.')
wait_parser.add_argument('current_state',
choices=statuses,
metavar='current_state',
help='state to consider valid while waiting')
wait_parser.add_argument('new_state',
choices=statuses,
metavar='new_state',
help='state to wait for')
return parser.parse_args(argv[1:])
def write_clustinfo(cluster_info, filename):
with open(filename, "w") as output_f:
out_s = json.dumps(cluster_info)
output_f.write(out_s)
def main(argv):
ccm_token = os.environ.get('CCM_AUTH_TOKEN', '')
if not ccm_token:
raise Exception('CCM_AUTH_TOKEN is required')
# used for status and for jenkins .properties file:
github_label = determine_github_label()
# error detection (and retry) for either a start or a stop operation:
start_stop_attempts = int(os.environ.get('CCM_ATTEMPTS', CCMLauncher.DEFAULT_ATTEMPTS))
launcher = CCMLauncher(ccm_token, github_label)
args = parse_args(argv)
if args.command == 'stop':
launcher.stop(StopConfig(args.ccm_id), start_stop_attempts)
elif args.command == 'trigger-stop':
launcher.trigger_stop(StopConfig(args.ccm_id))
elif args.command == 'wait':
# piggy-back off of StopConfig's env handling:
stop_config = StopConfig(args.ccm_id)
cluster_info = launcher.wait_for_status(
stop_config.cluster_id,
[args.current_state],
args.new_state,
stop_config.stop_timeout_mins)
if not cluster_info:
return 1
# print to stdout (the rest of this script only writes to stderr):
print(pprint.pformat(cluster_info))
if args.output:
write_clustinfo(cluster_info, args.output)
else: # 'start' or no command
cluster_info = _start_cluster(launcher, github_label, start_stop_attempts,
StartConfig(postlaunch_steps=args.configure))
if args.output:
write_clustinfo(cluster_info, args.output)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| []
| []
| [
"CCM_TIMEOUT_MINS",
"CCM_GITHUB_LABEL",
"CCM_DURATION_MINS",
"CCM_ADMIN_LOCATION",
"SECURITY",
"TEST_GITHUB_LABEL",
"CCM_ATTEMPTS",
"CCM_AUTH_TOKEN",
"CCM_AWS_REGION",
"CCM_CLOUD_PROVIDER",
"CCM_MOUNT_VOLUMES",
"DRY_RUN",
"WORKSPACE",
"DCOS_TEMPLATE_URL",
"CCM_AGENTS",
"CCM_CHANNEL",
"CCM_TEMPLATE",
"CCM_PUBLIC_AGENTS"
]
| [] | ["CCM_TIMEOUT_MINS", "CCM_GITHUB_LABEL", "CCM_DURATION_MINS", "CCM_ADMIN_LOCATION", "SECURITY", "TEST_GITHUB_LABEL", "CCM_ATTEMPTS", "CCM_AUTH_TOKEN", "CCM_AWS_REGION", "CCM_CLOUD_PROVIDER", "CCM_MOUNT_VOLUMES", "DRY_RUN", "WORKSPACE", "DCOS_TEMPLATE_URL", "CCM_AGENTS", "CCM_CHANNEL", "CCM_TEMPLATE", "CCM_PUBLIC_AGENTS"] | python | 18 | 0 | |
command/command_test.go | // Package command provides system command execution.
package command
import (
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"time"
"github.com/sensu/sensu-go/testing/testutil"
"github.com/stretchr/testify/assert"
)
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
command := strings.Join(os.Args[3:], " ")
stdin, _ := ioutil.ReadAll(os.Stdin)
hasArgs := len(os.Args) > 4
argStr := ""
if hasArgs {
argStr = strings.Join(os.Args[4:], " ")
}
switch command {
case "cat":
fmt.Fprintf(os.Stdout, "%s", stdin)
case "echo foo":
fmt.Fprintln(os.Stdout, argStr)
case "echo bar":
fmt.Fprintln(os.Stderr, argStr)
case "false":
os.Exit(1)
case "sleep 10":
time.Sleep(10 * time.Second)
}
os.Exit(0)
}
func TestExecuteCommand(t *testing.T) {
// test that stdout can be read from
echo := FakeCommand("echo", "foo")
echoExec, echoErr := ExecuteCommand(context.Background(), echo)
assert.Equal(t, nil, echoErr)
assert.Equal(t, "foo\n", echoExec.Output)
assert.Equal(t, 0, echoExec.Status)
assert.NotEqual(t, 0, echoExec.Duration)
// test that input can be passed to a command through stdin
cat := FakeCommand("cat")
cat.Input = "bar"
catExec, catErr := ExecuteCommand(context.Background(), cat)
assert.Equal(t, nil, catErr)
assert.Equal(t, "bar", testutil.CleanOutput(catExec.Output))
assert.Equal(t, 0, catExec.Status)
assert.NotEqual(t, 0, catExec.Duration)
// test that command exit codes can be read
falseCmd := FakeCommand("false")
falseExec, falseErr := ExecuteCommand(context.Background(), falseCmd)
assert.Equal(t, nil, falseErr)
assert.Equal(t, "", testutil.CleanOutput(falseExec.Output))
assert.Equal(t, 1, falseExec.Status)
assert.NotEqual(t, 0, falseExec.Duration)
// test that stderr can be read from
outputs := FakeCommand("echo bar 1>&2")
outputsExec, outputsErr := ExecuteCommand(context.Background(), outputs)
assert.Equal(t, nil, outputsErr)
assert.Equal(t, "bar\n", testutil.CleanOutput(outputsExec.Output))
assert.Equal(t, 0, outputsExec.Status)
assert.NotEqual(t, 0, outputsExec.Duration)
// test that commands can time out
sleep := FakeCommand("sleep 10")
sleep.Timeout = 1
sleepExec, sleepErr := ExecuteCommand(context.Background(), sleep)
assert.Equal(t, nil, sleepErr)
assert.Equal(t, "Execution timed out\n", testutil.CleanOutput(sleepExec.Output))
assert.Equal(t, 2, sleepExec.Status)
assert.NotEqual(t, 0, sleepExec.Duration)
// test that multiple commands can time out
sleepMultiple := FakeCommand("sleep 10 && echo foo")
sleepMultiple.Timeout = 1
sleepMultipleExec, sleepMultipleErr := ExecuteCommand(context.Background(), sleepMultiple)
assert.Equal(t, nil, sleepMultipleErr)
assert.Equal(t, "Execution timed out\n", testutil.CleanOutput(sleepMultipleExec.Output))
assert.Equal(t, 2, sleepMultipleExec.Status)
assert.NotEqual(t, 0, sleepMultipleExec.Duration)
}
| [
"\"GO_WANT_HELPER_PROCESS\""
]
| []
| [
"GO_WANT_HELPER_PROCESS"
]
| [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
src/main/main.go | package main
import (
"crypto/sha256"
"encoding/hex"
"time"
"net/http"
"os"
"log"
"github.com/gorilla/mux"
"gopkg.in/gin-gonic/gin.v1/json"
"io"
"github.com/davecgh/go-spew/spew"
"github.com/joho/godotenv"
)
//struct index timestamp hash prehash bpmn
type Block struct {
Index int
Timestamp string
BPM int //jump times
Hash string
PreHash string
}
//the whole blockchain
var blockChain []Block
//cal hash value
func calculateHash(block Block)string {
record:=string(block.Index)+block.Timestamp+string(block.BPM)+block.PreHash
h:=sha256.New()
h.Write([]byte(record))
hashed:=h.Sum(nil)
//encode hex
return hex.EncodeToString(hashed)
}
//generate calhash via last hash
func generateBlock(oldBlock Block,BPM int)(Block,error) {
var newBlock Block
t:=time.Now()
newBlock.Index=oldBlock.Index+1
newBlock.Timestamp=t.String()
newBlock.BPM=BPM
newBlock.PreHash=oldBlock.Hash
newBlock.Hash=calculateHash(newBlock)
return newBlock, nil
}
//isBlockValid
func isBlockValid(newBlock Block,oldBlock Block)bool {
if oldBlock.Index+1!=newBlock.Index {
return false
}
if oldBlock.Hash!=newBlock.PreHash {
return false
}
if calculateHash(newBlock)!=newBlock.Hash{
return false
}
return true
}
//which is main blockChain ,when it is mainChain
func replaceChain(newBlocks []Block) {
if len(newBlocks)>len(blockChain) {
}
}
//web server
func run() error {
mux:=makeMuxRouter()
httpAddr:=os.Getenv("ADDR")
log.Println("listening on",os.Getenv("ADDR"))
s:=&http.Server{
Addr:":"+httpAddr,
Handler:mux,
ReadTimeout:10*time.Second,
WriteTimeout:10*time.Second,
MaxHeaderBytes:1<<20,
}
if error:=s.ListenAndServe();error!=nil {
return error
}
return nil
}
func makeMuxRouter() http.Handler {
muxRouter:=mux.NewRouter()
muxRouter.HandleFunc("/",handleGetBlockchain).Methods("Get")
muxRouter.HandleFunc("/",handleWriteBlock).Methods("post")
return muxRouter
}
//get Blockchain handler
func handleGetBlockchain(w http.ResponseWriter, r *http.Request) {
bytes,err:=json.MarshalIndent(Blockchain,"","")
if err!=nil {
http.Error(w,err.Error(),http.StatusInternalServerError)
}
io.WriteString(w,string(bytes))
}
type Message struct {
BPM int
}
func handleWriteBlock(w http.ResponseWriter, r *http.Request) {
var m Message
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&m); err != nil {
respondWithJSON(w, r, http.StatusBadRequest, r.Body)
return
}
defer r.Body.Close()
newBlock, err := generateBlock(Blockchain[len(Blockchain)-1], m.BPM)
if err != nil {
respondWithJSON(w, r, http.StatusInternalServerError, m)
return
}
if isBlockValid(newBlock, Blockchain[len(Blockchain)-1]) {
newBlockchain := append(Blockchain, newBlock)
replaceChain(newBlockchain)
spew.Dump(Blockchain)
}
respondWithJSON(w, r, http.StatusCreated, newBlock)
}
func respondWithJSON(w http.ResponseWriter, r *http.Request, code int, payload interface{}) {
response, err := json.MarshalIndent(payload, "", " ")
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("HTTP 500: Internal Server Error"))
return
}
w.WriteHeader(code)
w.Write(response)
}
var Blockchain []Block
func main() {
err:=godotenv.Load()
if err!=nil {
log.Fatal(err)
}
go func() {
t:=time.Now()
genesisBlock:=Block{0,t.String(),0,"",""}
spew.Dump(genesisBlock)
Blockchain=append(Blockchain,genesisBlock)
}()
log.Fatal(run())
} | [
"\"ADDR\"",
"\"ADDR\""
]
| []
| [
"ADDR"
]
| [] | ["ADDR"] | go | 1 | 0 | |
ms_translator.py | # -*- coding: utf-8 -*-
import os, requests, uuid, json
# Checks to see if the Translator Text subscription key is available
# as an environment variable. If you are setting your subscription key as a
# string, then comment these lines out.
if 'TRANSLATOR_TEXT_KEY' in os.environ:
subscriptionKey = os.environ['TRANSLATOR_TEXT_KEY']
else:
print('Environment variable for TRANSLATOR_TEXT_KEY is not set.')
exit()
# If you want to set your subscription key as a string, uncomment the line
# below and add your subscription key.
subscriptionKey = 'nope'
#Temporary substitution
base_url = 'https://api.cognitive.microsofttranslator.com'
path = '/translate?api-version=3.0'
params = '&to=en'
constructed_url = base_url + path + params
def get_trans(text): #call microsoft bing api for english translation
headers = {
'Ocp-Apim-Subscription-Key': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
body = [{
'text' : text
}]
request = requests.post(constructed_url, headers=headers, json=body)
response = request.json()
#print(json.dumps(response, sort_keys=True, indent=4, ensure_ascii=False, separators=(',', ': ')))
return response[0]['translations'][0]['text']
| []
| []
| [
"TRANSLATOR_TEXT_KEY"
]
| [] | ["TRANSLATOR_TEXT_KEY"] | python | 1 | 0 | |
db/workflow.go | package db
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net"
"os"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/google/uuid"
"github.com/pkg/errors"
pb "github.com/tinkerbell/tink/protos/workflow"
wflow "github.com/tinkerbell/tink/workflow"
)
// Workflow represents a workflow instance in database
type Workflow struct {
State int32
ID, Hardware, Template string
CreatedAt, UpdatedAt *timestamp.Timestamp
}
var (
defaultMaxVersions = 3
maxVersions = defaultMaxVersions // maximum number of workflow data versions to be kept in database
)
// CreateWorkflow creates a new workflow
func (d TinkDB) CreateWorkflow(ctx context.Context, wf Workflow, data string, id uuid.UUID) error {
tx, err := d.instance.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable})
if err != nil {
return errors.Wrap(err, "BEGIN transaction")
}
err = insertActionList(ctx, d.instance, data, id, tx)
if err != nil {
return errors.Wrap(err, "failed to create workflow")
}
err = insertInWorkflow(ctx, d.instance, wf, tx)
if err != nil {
return errors.Wrap(err, "failed to create workflow")
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "COMMIT")
}
return nil
}
func insertInWorkflow(ctx context.Context, db *sql.DB, wf Workflow, tx *sql.Tx) error {
_, err := tx.Exec(`
INSERT INTO
workflow (created_at, updated_at, template, devices, id)
VALUES
($1, $1, $2, $3, $4)
ON CONFLICT (id)
DO
UPDATE SET
(updated_at, deleted_at, template, devices) = ($1, NULL, $2, $3);
`, time.Now(), wf.Template, wf.Hardware, wf.ID)
if err != nil {
return errors.Wrap(err, "INSERT in to workflow")
}
return nil
}
func insertIntoWfWorkerTable(ctx context.Context, db *sql.DB, wfID uuid.UUID, workerID uuid.UUID, tx *sql.Tx) error {
_, err := tx.Exec(`
INSERT INTO
workflow_worker_map (workflow_id, worker_id)
VALUES
($1, $2);
`, wfID, workerID)
//ON CONFLICT (workflow_id, worker_id)
//DO NOTHING;
// `, wfID, workerID)
if err != nil {
return errors.Wrap(err, "INSERT in to workflow_worker_map")
}
return nil
}
// Insert actions in the workflow_state table
func insertActionList(ctx context.Context, db *sql.DB, yamlData string, id uuid.UUID, tx *sql.Tx) error {
wf, err := wflow.Parse([]byte(yamlData))
if err != nil {
return err
}
var actionList []*pb.WorkflowAction
var uniqueWorkerID uuid.UUID
for _, task := range wf.Tasks {
taskEnvs := map[string]string{}
taskVolumes := map[string]string{}
for _, vol := range task.Volumes {
v := strings.Split(vol, ":")
taskVolumes[v[0]] = strings.Join(v[1:], ":")
}
for key, val := range task.Environment {
taskEnvs[key] = val
}
workerID, err := getWorkerID(ctx, db, task.WorkerAddr)
if err != nil {
return errors.WithMessage(err, "unable to insert into action list")
}
workerUID, err := uuid.Parse(workerID)
if err != nil {
return err
}
if uniqueWorkerID != workerUID {
err = insertIntoWfWorkerTable(ctx, db, id, workerUID, tx)
if err != nil {
return err
}
uniqueWorkerID = workerUID
}
for _, ac := range task.Actions {
acenvs := map[string]string{}
for key, val := range taskEnvs {
acenvs[key] = val
}
for key, val := range ac.Environment {
acenvs[key] = val
}
envs := []string{}
for key, val := range acenvs {
envs = append(envs, key+"="+val)
}
volumes := map[string]string{}
for k, v := range taskVolumes {
volumes[k] = v
}
for _, vol := range ac.Volumes {
v := strings.Split(vol, ":")
volumes[v[0]] = strings.Join(v[1:], ":")
}
ac.Volumes = []string{}
for k, v := range volumes {
ac.Volumes = append(ac.Volumes, k+":"+v)
}
action := pb.WorkflowAction{
TaskName: task.Name,
WorkerId: workerUID.String(),
Name: ac.Name,
Image: ac.Image,
Timeout: ac.Timeout,
Command: ac.Command,
OnTimeout: ac.OnTimeout,
OnFailure: ac.OnFailure,
Environment: envs,
Volumes: ac.Volumes,
}
actionList = append(actionList, &action)
}
}
totalActions := int64(len(actionList))
actionData, err := json.Marshal(actionList)
if err != nil {
return err
}
_, err = tx.Exec(`
INSERT INTO
workflow_state (workflow_id, current_worker, current_task_name, current_action_name, current_action_state, action_list, current_action_index, total_number_of_actions)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (workflow_id)
DO
UPDATE SET
(workflow_id, current_worker, current_task_name, current_action_name, current_action_state, action_list, current_action_index, total_number_of_actions) = ($1, $2, $3, $4, $5, $6, $7, $8);
`, id, "", "", "", 0, actionData, 0, totalActions)
if err != nil {
return errors.Wrap(err, "INSERT in to workflow_state")
}
return nil
}
// InsertIntoWfDataTable : Insert ephemeral data in workflow_data table
func (d TinkDB) InsertIntoWfDataTable(ctx context.Context, req *pb.UpdateWorkflowDataRequest) error {
version, err := getLatestVersionWfData(ctx, d.instance, req.GetWorkflowId())
if err != nil {
return err
}
//increment version
version = version + 1
tx, err := d.instance.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable})
if err != nil {
return errors.Wrap(err, "BEGIN transaction")
}
_, err = tx.Exec(`
INSERT INTO
workflow_data (workflow_id, version, metadata, data)
VALUES
($1, $2, $3, $4);
`, req.GetWorkflowId(), version, string(req.GetMetadata()), string(req.GetData()))
if err != nil {
return errors.Wrap(err, "INSERT Into workflow_data")
}
if version > int32(maxVersions) {
cleanVersion := version - int32(maxVersions)
_, err = tx.Exec(`
UPDATE workflow_data
SET
data = NULL
WHERE
workflow_id = $1 AND version = $2;
`, req.GetWorkflowId(), cleanVersion)
if err != nil {
return errors.Wrap(err, "UPDATE")
}
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "COMMIT")
}
return nil
}
// GetfromWfDataTable : Give you the ephemeral data from workflow_data table
func (d TinkDB) GetfromWfDataTable(ctx context.Context, req *pb.GetWorkflowDataRequest) ([]byte, error) {
version := req.GetVersion()
if req.Version == 0 {
v, err := getLatestVersionWfData(ctx, d.instance, req.GetWorkflowId())
if err != nil {
return []byte(""), err
}
version = v
}
query := `
SELECT data
FROM workflow_data
WHERE
workflow_id = $1 AND version = $2
`
row := d.instance.QueryRowContext(ctx, query, req.GetWorkflowId(), version)
buf := []byte{}
err := row.Scan(&buf)
if err == nil {
return []byte(buf), nil
}
if err != sql.ErrNoRows {
err = errors.Wrap(err, "SELECT")
d.logger.Error(err)
}
return []byte{}, nil
}
// GetWorkflowMetadata returns metadata wrt to the ephemeral data of a workflow
func (d TinkDB) GetWorkflowMetadata(ctx context.Context, req *pb.GetWorkflowDataRequest) ([]byte, error) {
version := req.GetVersion()
if req.Version == 0 {
v, err := getLatestVersionWfData(ctx, d.instance, req.GetWorkflowId())
if err != nil {
return []byte(""), err
}
version = v
}
query := `
SELECT metadata
FROM workflow_data
WHERE
workflow_id = $1 AND version = $2
`
row := d.instance.QueryRowContext(ctx, query, req.GetWorkflowId(), version)
buf := []byte{}
err := row.Scan(&buf)
if err == nil {
return []byte(buf), nil
}
if err != sql.ErrNoRows {
err = errors.Wrap(err, "SELECT from workflow_data")
d.logger.Error(err)
}
return []byte{}, nil
}
// GetWorkflowDataVersion returns the latest version of data for a workflow
func (d TinkDB) GetWorkflowDataVersion(ctx context.Context, workflowID string) (int32, error) {
return getLatestVersionWfData(ctx, d.instance, workflowID)
}
// GetWorkflowsForWorker : returns the list of workflows for a particular worker
func (d TinkDB) GetWorkflowsForWorker(id string) ([]string, error) {
rows, err := d.instance.Query(`
SELECT workflow_id
FROM workflow_worker_map
WHERE
worker_id = $1;
`, id)
if err != nil {
return nil, err
}
var wfID []string
defer rows.Close()
var workerID string
for rows.Next() {
err = rows.Scan(&workerID)
if err != nil {
err = errors.Wrap(err, "SELECT from worflow_worker_map")
d.logger.Error(err)
return nil, err
}
wfID = append(wfID, workerID)
}
err = rows.Err()
if err == sql.ErrNoRows {
return nil, nil
}
return wfID, err
}
// GetWorkflow returns a workflow
func (d TinkDB) GetWorkflow(ctx context.Context, id string) (Workflow, error) {
query := `
SELECT template, devices
FROM workflow
WHERE
id = $1
AND
deleted_at IS NULL;
`
row := d.instance.QueryRowContext(ctx, query, id)
var tmp, tar string
err := row.Scan(&tmp, &tar)
if err == nil {
return Workflow{ID: id, Template: tmp, Hardware: tar}, nil
}
if err != sql.ErrNoRows {
err = errors.Wrap(err, "SELECT")
d.logger.Error(err)
}
return Workflow{}, nil
}
// DeleteWorkflow deletes a workflow
func (d TinkDB) DeleteWorkflow(ctx context.Context, id string, state int32) error {
tx, err := d.instance.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable})
if err != nil {
return errors.Wrap(err, "BEGIN transaction")
}
_, err = tx.Exec(`
DELETE FROM workflow_worker_map
WHERE
workflow_id = $1;
`, id)
if err != nil {
return errors.Wrap(err, "Delete Workflow Error")
}
_, err = tx.Exec(`
DELETE FROM workflow_state
WHERE
workflow_id = $1;
`, id)
if err != nil {
return errors.Wrap(err, "Delete Workflow Error")
}
_, err = tx.Exec(`
UPDATE workflow
SET
deleted_at = NOW()
WHERE
id = $1;
`, id)
if err != nil {
return errors.Wrap(err, "UPDATE")
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "COMMIT")
}
return nil
}
// ListWorkflows returns all workflows
func (d TinkDB) ListWorkflows(fn func(wf Workflow) error) error {
rows, err := d.instance.Query(`
SELECT id, template, devices, created_at, updated_at
FROM workflow
WHERE
deleted_at IS NULL;
`)
if err != nil {
return err
}
defer rows.Close()
var (
id, tmp, tar string
crAt, upAt time.Time
)
for rows.Next() {
err = rows.Scan(&id, &tmp, &tar, &crAt, &upAt)
if err != nil {
err = errors.Wrap(err, "SELECT")
d.logger.Error(err)
return err
}
wf := Workflow{
ID: id,
Template: tmp,
Hardware: tar,
}
wf.CreatedAt, _ = ptypes.TimestampProto(crAt)
wf.UpdatedAt, _ = ptypes.TimestampProto(upAt)
err = fn(wf)
if err != nil {
return err
}
}
err = rows.Err()
if err == sql.ErrNoRows {
err = nil
}
return err
}
// UpdateWorkflow updates a given workflow
func (d TinkDB) UpdateWorkflow(ctx context.Context, wf Workflow, state int32) error {
tx, err := d.instance.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable})
if err != nil {
return errors.Wrap(err, "BEGIN transaction")
}
if wf.Hardware == "" && wf.Template != "" {
_, err = tx.Exec(`
UPDATE workflow
SET
updated_at = NOW(), template = $2
WHERE
id = $1;
`, wf.ID, wf.Template)
} else if wf.Hardware != "" && wf.Template == "" {
_, err = tx.Exec(`
UPDATE workflow
SET
updated_at = NOW(), devices = $2
WHERE
id = $1;
`, wf.ID, wf.Hardware)
} else {
_, err = tx.Exec(`
UPDATE workflow
SET
updated_at = NOW(), template = $2, devices = $3
WHERE
id = $1;
`, wf.ID, wf.Template, wf.Hardware)
}
if err != nil {
return errors.Wrap(err, "UPDATE")
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "COMMIT")
}
return nil
}
// UpdateWorkflowState : update the current workflow state
func (d TinkDB) UpdateWorkflowState(ctx context.Context, wfContext *pb.WorkflowContext) error {
tx, err := d.instance.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable})
if err != nil {
return errors.Wrap(err, "BEGIN transaction")
}
_, err = tx.Exec(`
UPDATE workflow_state
SET current_task_name = $2,
current_action_name = $3,
current_action_state = $4,
current_worker = $5,
current_action_index = $6
WHERE
workflow_id = $1;
`, wfContext.WorkflowId, wfContext.CurrentTask, wfContext.CurrentAction, wfContext.CurrentActionState, wfContext.CurrentWorker, wfContext.CurrentActionIndex)
if err != nil {
return errors.Wrap(err, "INSERT in to workflow_state")
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "COMMIT")
}
return nil
}
// GetWorkflowContexts : gives you the current workflow context
func (d TinkDB) GetWorkflowContexts(ctx context.Context, wfID string) (*pb.WorkflowContext, error) {
query := `
SELECT current_worker, current_task_name, current_action_name, current_action_index, current_action_state, total_number_of_actions
FROM workflow_state
WHERE
workflow_id = $1;
`
row := d.instance.QueryRowContext(ctx, query, wfID)
var cw, ct, ca string
var cai, tact int64
var cas pb.State
err := row.Scan(&cw, &ct, &ca, &cai, &cas, &tact)
if err == nil {
return &pb.WorkflowContext{
WorkflowId: wfID,
CurrentWorker: cw,
CurrentTask: ct,
CurrentAction: ca,
CurrentActionIndex: cai,
CurrentActionState: cas,
TotalNumberOfActions: tact}, nil
}
if err != sql.ErrNoRows {
err = errors.Wrap(err, "SELECT from worflow_state")
d.logger.Error(err)
}
return &pb.WorkflowContext{}, nil
}
// GetWorkflowActions : gives you the action list of workflow
func (d TinkDB) GetWorkflowActions(ctx context.Context, wfID string) (*pb.WorkflowActionList, error) {
query := `
SELECT action_list
FROM workflow_state
WHERE
workflow_id = $1;
`
row := d.instance.QueryRowContext(ctx, query, wfID)
var actionList string
err := row.Scan(&actionList)
if err == nil {
actions := []*pb.WorkflowAction{}
if err := json.Unmarshal([]byte(actionList), &actions); err != nil {
return nil, err
}
return &pb.WorkflowActionList{
ActionList: actions}, nil
}
if err != sql.ErrNoRows {
err = errors.Wrap(err, "SELECT from worflow_state")
d.logger.Error(err)
}
return &pb.WorkflowActionList{}, nil
}
// InsertIntoWorkflowEventTable : insert workflow event table
func (d TinkDB) InsertIntoWorkflowEventTable(ctx context.Context, wfEvent *pb.WorkflowActionStatus, time time.Time) error {
tx, err := d.instance.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable})
if err != nil {
return errors.Wrap(err, "BEGIN transaction")
}
// TODO "created_at" field should be set in worker and come in the request
_, err = tx.Exec(`
INSERT INTO
workflow_event (workflow_id, worker_id, task_name, action_name, execution_time, message, status, created_at)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8);
`, wfEvent.WorkflowId, wfEvent.WorkerId, wfEvent.TaskName, wfEvent.ActionName, wfEvent.Seconds, wfEvent.Message, wfEvent.ActionStatus, time)
if err != nil {
return errors.Wrap(err, "INSERT in to workflow_event")
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "COMMIT")
}
return nil
}
// ShowWorkflowEvents returns all workflows
func (d TinkDB) ShowWorkflowEvents(wfID string, fn func(wfs *pb.WorkflowActionStatus) error) error {
rows, err := d.instance.Query(`
SELECT worker_id, task_name, action_name, execution_time, message, status, created_at
FROM workflow_event
WHERE
workflow_id = $1
ORDER BY
created_at ASC;
`, wfID)
if err != nil {
return err
}
defer rows.Close()
var (
status int32
secs int64
id, tName, aName, msg string
evTime time.Time
)
for rows.Next() {
err = rows.Scan(&id, &tName, &aName, &secs, &msg, &status, &evTime)
if err != nil {
err = errors.Wrap(err, "SELECT")
d.logger.Error(err)
return err
}
createdAt, _ := ptypes.TimestampProto(evTime)
wfs := &pb.WorkflowActionStatus{
WorkerId: id,
TaskName: tName,
ActionName: aName,
Seconds: secs,
Message: msg,
ActionStatus: pb.State(status),
CreatedAt: createdAt,
}
err = fn(wfs)
if err != nil {
return err
}
}
err = rows.Err()
if err == sql.ErrNoRows {
err = nil
}
return err
}
func getLatestVersionWfData(ctx context.Context, db *sql.DB, wfID string) (int32, error) {
query := `
SELECT COUNT(*)
FROM workflow_data
WHERE
workflow_id = $1;
`
row := db.QueryRowContext(ctx, query, wfID)
var version int32
err := row.Scan(&version)
if err != nil {
return -1, err
}
return version, nil
}
func getWorkerIDbyMac(ctx context.Context, db *sql.DB, mac string) (string, error) {
arg := `
{
"network": {
"interfaces": [
{
"dhcp": {
"mac": "` + mac + `"
}
}
]
}
}
`
query := `
SELECT id
FROM hardware
WHERE
deleted_at IS NULL
AND
data @> $1
`
id, err := get(ctx, db, query, arg)
if errors.Cause(err) == sql.ErrNoRows {
err = errors.WithMessage(errors.New(mac), "mac")
}
return id, err
}
func getWorkerIDbyIP(ctx context.Context, db *sql.DB, ip string) (string, error) {
// update for instance (under metadata)
instance := `
{
"instance": {
"ip_addresses": [
{
"address": "` + ip + `"
}
]
}
}
`
hardwareOrManagement := `
{
"network": {
"interfaces": [
{
"dhcp": {
"ip": {
"address": "` + ip + `"
}
}
}
]
}
}
`
query := `
SELECT id
FROM hardware
WHERE
deleted_at IS NULL
AND (
data @> $1
OR
data @> $2
)
`
id, err := get(ctx, db, query, instance, hardwareOrManagement)
if errors.Cause(err) == sql.ErrNoRows {
err = errors.WithMessage(errors.New(ip), "ip")
}
return id, err
}
func getWorkerID(ctx context.Context, db *sql.DB, addr string) (string, error) {
_, err := net.ParseMAC(addr)
if err != nil {
ip := net.ParseIP(addr)
if ip == nil || ip.To4() == nil {
return "", fmt.Errorf("invalid worker address: %s", addr)
}
id, err := getWorkerIDbyIP(ctx, db, addr)
return id, errors.WithMessage(err, "no worker found")
}
id, err := getWorkerIDbyMac(ctx, db, addr)
return id, errors.WithMessage(err, "no worker found")
}
func init() {
val := os.Getenv("MAX_WORKFLOW_DATA_VERSIONS")
if v, err := strconv.Atoi(val); err == nil {
maxVersions = v
}
}
| [
"\"MAX_WORKFLOW_DATA_VERSIONS\""
]
| []
| [
"MAX_WORKFLOW_DATA_VERSIONS"
]
| [] | ["MAX_WORKFLOW_DATA_VERSIONS"] | go | 1 | 0 | |
main.go | package main
import (
"fmt"
"net"
"net/http"
"net/http/fcgi"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/brandfolder/gin-gorelic"
"github.com/coreos/go-systemd/activation"
"github.com/gin-gonic/gin"
)
// Logger is a simple log handler, out puts in the standard of apache access log common.
// See http://httpd.apache.org/docs/2.2/logs.html#accesslog
func Logger() gin.HandlerFunc {
return func(c *gin.Context) {
t := time.Now()
ip, err := net.ResolveTCPAddr("tcp", c.Request.RemoteAddr)
if err != nil {
c.Abort()
}
// before request
c.Next()
// after request
user := "-"
if c.Request.URL.User != nil {
user = c.Request.URL.User.Username()
}
latency := time.Since(t)
// This is the format of Apache Log Common, with an additional field of latency
fmt.Printf("%v - %v [%v] \"%v %v %v\" %v %v %v\n",
ip.IP, user, t.Format(time.RFC3339), c.Request.Method, c.Request.URL.Path,
c.Request.Proto, c.Writer.Status(), c.Request.ContentLength, latency)
}
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
func testRemoteTCPPort(address string) bool {
_, err := net.DialTimeout("tcp", address, 3*time.Second)
if err != nil {
return false
}
return true
}
func mainHandler(c *gin.Context) {
fields := strings.Split(c.Params.ByName("field"), ".")
ip, err := net.ResolveTCPAddr("tcp", c.Request.RemoteAddr)
if err != nil {
c.Abort()
}
//proxy handling stuff
// use CF-Connecting-IP header as ip if available (this means app is invoked behind a proxy)
cfIP := net.ParseIP(c.Request.Header.Get("CF-Connecting-IP"))
if cfIP != nil {
ip.IP = cfIP
}
// use CF-Connecting-PORT header as source port if available (this means app is invoked behind a proxy)
cfPORT := c.Request.Header.Get("CF-Connecting-PORT")
if cfPORTnum, err := strconv.Atoi(cfPORT); err == nil {
ip.Port = cfPORTnum
}
// Use CF-Connection header instead of HTTP Connection header if available (this means app is invoked behind a proxy)
ConnectionHeader := c.Request.Header.Get("Connection")
if cfCONN := c.Request.Header.Get("CF-Connection"); cfCONN != "" {
ConnectionHeader = cfCONN
}
if fields[0] == "porttest" {
if len(fields) >= 2 {
if port, err := strconv.Atoi(fields[1]); err == nil && port > 0 && port <= 65535 {
c.String(200, fmt.Sprintln(testRemoteTCPPort(ip.IP.String()+":"+fields[1])))
} else {
c.String(400, "Invalid Port Number")
}
} else {
c.String(400, "Need Port")
}
return
}
c.Set("ip", ip.IP.String())
c.Set("port", ip.Port)
c.Set("ua", c.Request.UserAgent())
c.Set("protocol", c.Request.Proto)
c.Set("lang", c.Request.Header.Get("Accept-Language"))
c.Set("encoding", c.Request.Header.Get("Accept-Encoding"))
c.Set("method", c.Request.Method)
c.Set("connection", ConnectionHeader)
c.Set("mime", c.Request.Header.Get("Accept"))
c.Set("charset", c.Request.Header.Get("Accept-Charset"))
c.Set("referer", c.Request.Header.Get("Referer"))
c.Set("via", c.Request.Header.Get("Via"))
c.Set("forwarded", c.Request.Header.Get("X-Forwarded-For"))
c.Set("country", c.Request.Header.Get("CF-IPCountry"))
r := strings.NewReplacer("0", "No", "1", "Yes")
c.Set("dnt", r.Replace(c.Request.Header.Get("DNT")))
c.Set("cache", c.Request.Header.Get("cache-control"))
ua := strings.Split(c.Request.UserAgent(), "/")
// Only lookup hostname if the results are going to need it.
if stringInSlice(fields[0], []string{"all", "host"}) || (fields[0] == "" && ua[0] != "curl" && ua[0] != "Wget" && ua[0] != "fetch") {
hostnames, err := net.LookupAddr(ip.IP.String())
if err != nil {
c.Set("host", "")
} else {
c.Set("host", hostnames[0])
}
}
wantsJSON := false
if len(fields) >= 2 && fields[1] == "json" {
wantsJSON = true
}
switch fields[0] {
case "":
//If the user is using curl, wget or fetch, then we should just return the IP, else we show the home page.
if ua[0] == "curl" || ua[0] == "Wget" || ua[0] == "fetch" {
c.String(200, fmt.Sprintln(ip.IP))
} else {
c.HTML(200, "index.html", c.Keys)
}
return
case "request":
c.JSON(200, c.Request)
return
case "all":
if wantsJSON {
c.JSON(200, c.Keys)
} else {
c.String(200, "%v", c.Keys)
}
return
}
fieldResult, exists := c.Get(fields[0])
if !exists {
c.String(404, "Not Found")
return
}
c.String(200, fmt.Sprintln(fieldResult))
}
// FileServer is a basic file serve handler, this is just here as an example.
// gin.Static() should be used instead
func FileServer(root string) gin.HandlerFunc {
return func(c *gin.Context) {
file := c.Params.ByName("file")
if !strings.HasPrefix(file, "/") {
file = "/" + file
}
http.ServeFile(c.Writer, c.Request, path.Join(root, path.Clean(file)))
}
}
func main() {
r := gin.New()
r.Use(gin.Recovery())
r.Use(Logger())
r.LoadHTMLGlob("templates/*")
if NEWRELIC_LICENSE_KEY := os.Getenv("NEWRELIC_LICENSE_KEY"); NEWRELIC_LICENSE_KEY != "" {
var NEWRELIC_APPLICATION_NAME string
if NEWRELIC_APPLICATION_NAME = os.Getenv("NEWRELIC_APPLICATION_NAME"); NEWRELIC_APPLICATION_NAME == "" {
NEWRELIC_APPLICATION_NAME = "ifconfig.io"
}
gorelic.InitNewrelicAgent(NEWRELIC_LICENSE_KEY, NEWRELIC_APPLICATION_NAME, true)
r.Use(gorelic.Handler)
}
r.GET("/:field", mainHandler)
r.GET("/", mainHandler)
// Create a listener for FCGI
fcgi_listen, err := net.Listen("tcp", "127.0.0.1:4000")
if err != nil {
panic(err)
}
errc := make(chan error)
go func(errc chan error) {
for err := range errc {
panic(err)
}
}(errc)
go func(errc chan error) {
errc <- fcgi.Serve(fcgi_listen, r)
}(errc)
// Listen on whatever systemd tells us to.
listeners, err := activation.Listeners()
if err != nil {
fmt.Printf("Could not get systemd listerns with err %q", err)
}
for _, listener := range listeners {
go func(errc chan error) {
errc <- http.Serve(listener, r)
}(errc)
}
port := os.Getenv("PORT")
host := os.Getenv("HOST")
if port == "" {
port = "8080"
}
errc <- r.Run(host + ":" + port)
}
| [
"\"NEWRELIC_LICENSE_KEY\"",
"\"NEWRELIC_APPLICATION_NAME\"",
"\"PORT\"",
"\"HOST\""
]
| []
| [
"PORT",
"HOST",
"NEWRELIC_LICENSE_KEY",
"NEWRELIC_APPLICATION_NAME"
]
| [] | ["PORT", "HOST", "NEWRELIC_LICENSE_KEY", "NEWRELIC_APPLICATION_NAME"] | go | 4 | 0 | |
talentpool/settings.py | """
Django settings for talentpool project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-l35sfvf1%sogr0j+gw4l$z2hm&17@u$&mp)*_75*@d6amy!b=r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"] if DEBUG else []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'talentpool.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'talentpool.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
"NAME": os.environ.get("SQL_DATABASE", "talentpool"),
"USER": os.environ.get("SQL_USER", "talentpool"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "talentpool"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "talentpool/static")]
STATICFILES_STORAGE = "whitenoise.django.GzipManifestStaticFilesStorage"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
# MEDIA_ROOT = "/deploy/paytime/media"
# https://data-flair.training/blogs/django-file-upload/
# This is the URL the user can go to and upload their files from the browser
MEDIA_URL = "/media/"
# Tells Django to store all the uploaded files in a folder called ’media’
# created in the BASE_DIR, i.e., the project Directory
# therefore we need to create a folder called media in the root
# of this project, on the same level as manage.py
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
# 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticated',)
# 'DEFAULT_AUTHENTICATION_CLASSES': ('rest_framework.authentication.TokenAuthentication',),
# 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAdminUser', ),
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
),
}
AUTH_USER_MODEL = "user.User"
| []
| []
| [
"SQL_PASSWORD",
"SQL_DATABASE",
"SQL_USER",
"SQL_PORT",
"SQL_HOST"
]
| [] | ["SQL_PASSWORD", "SQL_DATABASE", "SQL_USER", "SQL_PORT", "SQL_HOST"] | python | 5 | 0 | |
pkg/api/handlers/utils/pods.go | package utils
import (
"net/http"
"github.com/containers/libpod/libpod"
lpfilters "github.com/containers/libpod/libpod/filters"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/gorilla/schema"
)
func GetPods(w http.ResponseWriter, r *http.Request) ([]*entities.ListPodsReport, error) {
var (
lps []*entities.ListPodsReport
pods []*libpod.Pod
filters []libpod.PodFilter
)
runtime := r.Context().Value("runtime").(*libpod.Runtime)
decoder := r.Context().Value("decoder").(*schema.Decoder)
query := struct {
All bool
Filters map[string][]string `schema:"filters"`
Digests bool
}{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
return nil, err
}
if _, found := r.URL.Query()["digests"]; found && query.Digests {
UnSupportedParameter("digests")
}
for k, v := range query.Filters {
for _, filter := range v {
f, err := lpfilters.GeneratePodFilterFunc(k, filter)
if err != nil {
return nil, err
}
filters = append(filters, f)
}
}
pods, err := runtime.Pods(filters...)
if err != nil {
return nil, err
}
for _, pod := range pods {
status, err := pod.GetPodStatus()
if err != nil {
return nil, err
}
ctrs, err := pod.AllContainers()
if err != nil {
return nil, err
}
infraID, err := pod.InfraContainerID()
if err != nil {
return nil, err
}
lp := entities.ListPodsReport{
Cgroup: pod.CgroupParent(),
Created: pod.CreatedTime(),
Id: pod.ID(),
Name: pod.Name(),
Namespace: pod.Namespace(),
Status: status,
InfraId: infraID,
Labels: pod.Labels(),
}
for _, ctr := range ctrs {
state, err := ctr.State()
if err != nil {
return nil, err
}
lp.Containers = append(lp.Containers, &entities.ListPodContainer{
Id: ctr.ID(),
Names: ctr.Name(),
Status: state.String(),
})
}
lps = append(lps, &lp)
}
return lps, nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
get-the-news.py | #!/usr/bin/env python3
import os
import spotipy
import spotipy.util as util
import telegram
playlist_id = "3Kz9W3n6zGkHVBtoGU9mwI"
username = "floresrobles"
max_tracks_on_playlist = 400
country = "FR"
scope = 'playlist-modify-public'
max_albums_per_call = 20
max_tracks_per_call = 100
def fetch_followed_artists(sp):
artists = []
response = sp.current_user_followed_artists()
while response:
artists.extend(response['artists']['items'])
response = sp.next(response['artists'])
return artists
def fetch_albums(sp, artists, type):
albums = []
for artist in artists:
response = sp.artist_albums(artist['id'], album_type=type, country=country)
while response:
albums.extend(response["items"])
response = sp.next(response)
return albums
def fetch_full_albums(sp, albums):
full_albums = [dict() for x in range(len(albums))]
for start in range(0, len(albums), max_albums_per_call):
response = sp.albums([album["id"] for album in albums[start: start + max_albums_per_call]])
full_albums.extend(response['albums'])
return full_albums
def order_albums(albums):
albums = [album for album in albums if "release_date" in album]
ordered = sorted(albums, key=lambda album: album["release_date"], reverse=True)
return ordered
def filter_tracks(sp, full_albums):
tracks = []
for album in full_albums:
tracks.extend(album["tracks"]["items"])
return tracks
def get_playlist(sp, playlist_id):
results = sp.user_playlist(username, playlist_id, fields="tracks,next")
tracks = results['tracks']
items = tracks["items"]
while tracks['next']:
tracks = sp.next(tracks)
items.extend(tracks["items"])
return items
def set_tracks(sp, tracks, playlist_id):
sp.user_playlist_replace_tracks(username, playlist_id, [track["uri"] for track in tracks[0:max_tracks_per_call]])
for start in range(max_tracks_per_call, max_tracks_on_playlist, max_tracks_per_call):
sp.user_playlist_add_tracks(username, playlist_id, [track["uri"] for track in tracks[start:start + max_tracks_per_call]])
def get_news():
token = util.prompt_for_user_token(username, scope = scope)
if token:
sp = spotipy.Spotify(auth=token)
artists = fetch_followed_artists(sp)
albums = fetch_albums(sp, artists, 'album,single')
full_albums = fetch_full_albums(sp, albums)
ordered_albums = order_albums(full_albums)
tracks = filter_tracks(sp, ordered_albums)[:max_tracks_on_playlist]
old_tracks = get_playlist(sp, playlist_id)
old_tracks_ids = [track["track"]["id"] for track in old_tracks]
tracks_ids = [track["id"] for track in tracks]
new_tracks_ids = [val for val in tracks_ids if val not in old_tracks_ids]
new_tracks = [track["artists"][0]["name"] + ", " + track["name"] for track in tracks if track["id"] in new_tracks_ids]
if len(new_tracks_ids) > 0:
set_tracks(sp, tracks, playlist_id)
bot = telegram.Bot(token=os.getenv('TELEGRAM_TOKEN'))
bot.send_message(chat_id=os.getenv('TELEGRAM_CHAT_ID'), text="\n".join(new_tracks)[0:4095])
else:
print("Update token in env")
get_news()
| []
| []
| [
"TELEGRAM_CHAT_ID",
"TELEGRAM_TOKEN"
]
| [] | ["TELEGRAM_CHAT_ID", "TELEGRAM_TOKEN"] | python | 2 | 0 | |
k8s-webshell/controllers/terminalws.go | package controllers
import (
"encoding/json"
"fmt"
"github.com/astaxie/beego"
"github.com/docker/docker/pkg/term"
"gopkg.in/igm/sockjs-go.v2/sockjs"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/kubernetes/pkg/util/interrupt"
"net/http"
)
func (self TerminalSockjs) Read(p []byte) (int, error) {
var reply string
var msg map[string]uint16
reply, err := self.conn.Recv()
if err != nil {
return 0, err
}
if err := json.Unmarshal([]byte(reply), &msg); err != nil {
return copy(p, reply), nil
} else {
self.sizeChan <- &remotecommand.TerminalSize{
msg["cols"],
msg["rows"],
}
return 0, nil
}
}
func (self TerminalSockjs) Write(p []byte) (int, error) {
err := self.conn.Send(string(p))
return len(p), err
}
type TerminalSockjs struct {
conn sockjs.Session
sizeChan chan *remotecommand.TerminalSize
context string
namespace string
pod string
container string
}
// 实现tty size queue
func (self *TerminalSockjs) Next() *remotecommand.TerminalSize {
size := <-self.sizeChan
beego.Debug(fmt.Sprintf("terminal size to width: %d height: %d", size.Width, size.Height))
return size
}
func buildConfigFromContextFlags(context, kubeconfigPath string) (*rest.Config, error) {
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
&clientcmd.ConfigOverrides{
CurrentContext: context,
}).ClientConfig()
}
func getKubeConfig(region string) (*rest.Config, *kubernetes.Clientset, error) {
redisUrl := os.Getenv("redis_url")
if len(redisUrl) == 0 {
return nil,nil, errors.New("cannot found redis_url")
}
log.Println(redisUrl)
conn, err := redis.DialURL(redisUrl)
if err != nil {
// handle connection error
log.Println(err)
time.Sleep(3 * time.Second)
}
defer conn.Close()
data, err := conn.Do("hget", "cluster_kubeconfig", region)
log.Println(region)
if err != nil {
return nil,nil, err
}
config, err := clientcmd.RESTConfigFromKubeConfig([]byte(data.([]byte)))
clientset, err := kubernetes.NewForConfig(config)
return config, clientset, err
}
// 处理输入输出与sockjs 交互
func Handler(t *TerminalSockjs, cmd string) error {
// config, err := buildConfigFromContextFlags(t.context, beego.AppConfig.String("kubeconfig"))
// if err != nil {
// return err
// }
// groupversion := schema.GroupVersion{
// Group: "",
// Version: "v1",
// }
// config.GroupVersion = &groupversion
// config.APIPath = "/api"
// config.ContentType = runtime.ContentTypeJSON
// config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
// restclient, err := rest.RESTClientFor(config)
// if err != nil {
// return err
// }
config, clientset ,err := getKubeConfig(t.context)
if err != nil {
return err
}
fn := func() error {
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
Name(t.pod).
Namespace(t.namespace).
SubResource("exec").
Param("container", t.container).
Param("stdin", "true").
Param("stdout", "true").
Param("stderr", "true").
Param("command", cmd).Param("tty", "true")
req.VersionedParams(
&v1.PodExecOptions{
Container: t.container,
Command: []string{},
Stdin: true,
Stdout: true,
Stderr: true,
TTY: true,
},
scheme.ParameterCodec,
)
executor, err := remotecommand.NewSPDYExecutor(
config, http.MethodPost, req.URL(),
)
if err != nil {
return err
}
return executor.Stream(remotecommand.StreamOptions{
Stdin: t,
Stdout: t,
Stderr: t,
Tty: true,
TerminalSizeQueue: t,
})
}
inFd, _ := term.GetFdInfo(t.conn)
state, err := term.SaveState(inFd)
return interrupt.Chain(nil, func() {
term.RestoreTerminal(inFd, state)
}).Run(fn)
}
// 实现http.handler 接口获取入参
func (self TerminalSockjs) ServeHTTP(w http.ResponseWriter, r *http.Request) {
context := r.FormValue("context")
namespace := r.FormValue("namespace")
pod := r.FormValue("pod")
container := r.FormValue("container")
Sockjshandler := func(session sockjs.Session) {
t := &TerminalSockjs{session, make(chan *remotecommand.TerminalSize),
context, namespace, pod, container}
if err := Handler(t, "/bin/bash"); err != nil {
beego.Error(err)
beego.Error(Handler(t, "/bin/sh"))
}
}
sockjs.NewHandler("/terminal/ws", sockjs.DefaultOptions, Sockjshandler).ServeHTTP(w, r)
}
| [
"\"redis_url\""
]
| []
| [
"redis_url"
]
| [] | ["redis_url"] | go | 1 | 0 | |
redwood/cli/admin/delete.py | """
delete.py
This script allows developers to delete files generated from redwood storage
system.
"""
import argparse
import os
import json
import boto3
import botocore
import defaults
import docker
import logging
import urllib2
import ssl
import datetime
from io import BytesIO
logger = logging.getLogger('admin-delete')
logger.setLevel(level=logging.INFO)
strmhd = logging.StreamHandler()
strmhd.setLevel(level=logging.INFO)
logger.addHandler(strmhd)
class ICDCDException(Exception):
"""
Base exception class for DCCOPS admin scripts
"""
message = None
def __repr__(self):
"""
Should have the same functionality as self.__str__()
Returns
-------
str
output of self.__str__()
"""
return self.__str__()
def __str__(self):
"""
Outputs a formatted error message
Returns
-------
str
A formatted error message
"""
return "{}: {}".format(self.__class__.__name__, self.message)
class MetadataDeleteError(ICDCDException):
"""
Thrown if a file metadata entry couldn't be deleted
"""
def __init__(self, file_uuid=""):
"""
Initializes error message
Parameters
----------
file_uuid: str
file_uuid of the file metadata
"""
self.file_name = file_uuid
self.message = "Unable to remove file " \
" {} from Metadata Server".format(self.file_name)
class ICDCDBadAWSKeys(ICDCDException):
"""
Should be thrown the AWS given are not valid for accessing S3 buckets
"""
def __init__(self):
"""
Initializes error message
"""
self.message = "AWS didn't receive the right access" \
" and secret access keys."
class RedwoodDeleteError(ICDCDException):
"""
Should be thrown if file wasn't deleted properly
"""
def __init__(self, file_name=""):
"""
Initializes error message
Parameters
----------
file_name: str
File uuid of the file that can't be deleted
"""
self.file_name = file_name
self.message = "Unable to delete File {}." \
" File still exists in bucket".format(self.file_name)
class ForbiddenDeleteError(ICDCDException):
"""
Thrown if a file that shouldn't be deleted was about to be deleted.
"""
def __init__(self, message=""):
"""
Initializes error message
Parameters
----------
message: str
Error Message
"""
self.message = message
class RedwoodFileNotFoundError(ICDCDException):
"""
Should be thrown if a file wasn't found
"""
def __init__(self, file_uuid=""):
"""
Initializes error message
Parameters
----------
file_uuid: str
File UUID that can't be found
"""
self.file_uuid = file_uuid
self.message = "Cannot find the file named {}." \
" The file uuid may be incorrect or the file is not" \
" in the bucket.".format(self.file_uuid)
class RedwoodMissingDataError(ICDCDException):
"""
Thrown if specific metadata wasn't in the file metadata database
"""
def __init__(self, message=""):
"""
Initializes error message
Parameters
----------
message: str
Error Message
"""
self.message = message
class RedwoodFileMetadataAPI:
"""
Retrieves and modifies data from the redwood metadata server by
accessing the https website or the MongoDB container directly
Attributes
-----------
endpoint : str
The base url of the https metadata website
mongodb_container_name : str
The name of the docker container where the metadata
database is located
table_url : str
The exposed url of the MongoDB dcc-metadata database
"""
FILE_NAME_KEY = 'fileName'
FILE_NAME_BUNDLE_ID = 'gnosId'
def __init__(self, endpoint, mongodb_container_name=None,
table_url=None):
"""
Initializes attributes
Parameters
----------
endpoint : str
The base url of the https metadata website
mongodb_container_name : str
The name of the docker container where the metadata
database is located
table_url : str
The exposed url of the mongoDB dcc-metadata database
"""
self.mongodb_container_name = mongodb_container_name or \
defaults.MONGODB_CONTAINER
self.table_url = table_url or defaults.MONGODB_URL
self.endpoint = endpoint
def get_file_uuids_from_bundle(self, bundle_id, context=None):
context = context or self._generate_fake_context()
url = 'https://metadata.{}/entities?&gnosId={}'.format(
self.endpoint, bundle_id)
file_dict = json.load(urllib2.urlopen(url, context=context))
return [file_data['id'] for file_data in file_dict['content']]
def _run_mongo_shell_script(self, js_command):
"""
Access the redwood-metadata-db docker container. Then, runs a MongoDB
shell command by using the given javascript command
Parameters
----------
js_command
The javascript command that the MongoDB shell will execute
Returns
-------
str
The output from MongoDB shell script
"""
client = docker.APIClient()
exec_info = client.exec_create(defaults.MONGODB_CONTAINER,
['mongo', self.table_url, '--quiet',
'--eval', js_command])
res = client.exec_start(exec_info['Id'])
return res.strip()
def delete_entity(self, file_uuid):
"""
Deletes the file metadata from the file metadata server by executing
a MongoDB shell delete command in the metadata server's docker
container.
Parameters
----------
file_uuid : str
The file_uuid of the target deleted file to locate the database
entry
Raises
-------
MetadataDeleteError
Either the file metadata database is unable to delete the file's
entry or the database doesn't contain any entries with the given
file_uuid
"""
delete_js = "var result = db.Entity.deleteMany(" \
"{ _id: '{file_name}'});" \
"printjson(result);".replace('{file_name}', file_uuid)
res = self._run_mongo_shell_script(delete_js)
if json.loads(res)['deletedCount'] < 1:
raise MetadataDeleteError(file_uuid)
def get_file_metadata(self, file_uuid, context=None):
"""
Gets the file metadata from the https metadata website
Parameters
----------
file_uuid : str
The target file's uuid for locating its file metadata
context : ssl.SSLContext, optional
The custom context for accessing the metadata website. Will default
to a context with a fake cert.
Returns
-------
dict
the file metadata of the target file
Raises
------
RedwoodMissingDataError
Can't find the file metadata with the given file_uuid
"""
context = context or self._generate_fake_context()
url = 'https://metadata.{}/entities/{}'.format(
self.endpoint, file_uuid)
try:
return json.load(urllib2.urlopen(url, context=context))
except urllib2.HTTPError as e:
if e.code == 404:
error_msg = "Unable to find metadata entry " \
"at {} for File {}.".format(url, file_uuid)
raise RedwoodMissingDataError(error_msg)
else:
raise
def get_bundle_metadata_info(self, bundle_id, context=None):
"""
Gets the file metadata of the
bundle's metadata.json (bundle metadata file) from the https metadata
website
Parameters
----------
bundle_id : str
The metadata.json's bundle uuid
context : ssl.SSLContext, optional
The context for accessing the metadata website
Returns
--------
dict
The file metadata of the target bundle's
metadata.json (bundle metadata file)
"""
context = context or self._generate_fake_context()
url = 'https://metadata.{}/entities?fileName={}&gnosId={}'.format(
self.endpoint, defaults.BUNDLE_METADATA_FILENAME, bundle_id)
return json.load(urllib2.urlopen(url, context=context))
@staticmethod
def _generate_fake_context():
"""
Generates a fake ssl.SSLContext for retrieving json data by https
Returns
-------
An ssl.SSLContext containing a fake cert
"""
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
return ctx
class DCCOpsRepo:
"""
An enumeration representing the repositories used by DCC-Ops
"""
BOARDWALK = 'boardwalk'
ACTION_SERVICE = 'action_service'
REDWOOD = 'redwood'
class DCCOpsEnv:
"""
Contains all the settings from DCC-Ops.
Attributes
-----------
dccops_dir: str
The root directory of the DCC-Ops repository
_env_vars: dict
A dictionary of all repos in DCC-Ops. Each repo contains a list
the environment variables used by that repo
"""
def __init__(self, dcc_ops_directory):
"""
Collects the environment variables from Boardwalk,
Action Service, and Redwood from the DCC-Ops repository.
Also, initializes the dcc ops directory attribute.
Parameters
----------
dcc_ops_directory: str
The directory of the DCC-Ops directory
"""
self.dccops_dir = dcc_ops_directory
self._env_vars = {}
self._sync_settings(DCCOpsRepo.BOARDWALK,
defaults.DCCOPS_BOARDWALK_SUB_DIR)
self._sync_settings(DCCOpsRepo.ACTION_SERVICE,
defaults.DCCOPS_ACTION_SERVICE_SUB_DIR)
self._sync_settings(DCCOpsRepo.REDWOOD,
defaults.DCCOPS_REDWOOD_SUB_DIR)
def _sync_settings(self, repo, repo_subdir,
env_vars_filename=defaults.DCCOPS_ENV_FILENAME):
"""
Gathers the environment variables from the environment variable file
of a the given module sub-directory in DCC-Ops.
This is done by first reading each line in the file environment
variable file. Then, the var name and value extracted from the line by
splitting the it using the "=" character as the delimiter. Then, the
variable name and value are saved in a dictionary.
Parameters
----------
repo_subdir: str
the repo's sub-directory containing the environment variable file
env_vars_filename: str, optional
the filename of the environment variable file
repo: DCCOpsRepo, str
The repo where the environment variable is located
"""
with open(os.path.join(self.dccops_dir, repo_subdir,
env_vars_filename), 'r') as env_file:
var_dict = {}
for setting in env_file.readlines():
if '=' in setting:
var_name, var_setting = setting.split('=')
var_dict[var_name] = var_setting.strip()
self._env_vars[repo] = var_dict
def get_env_var(self, repo, var_name):
"""
Gets the value of the environment variable from the given repo and var
name
Parameters
----------
repo: DCCOpsRepo, str
The repo where the environment variable is located
var_name: str
The name of the environment variable
Returns
-------
str
The value of the environment variable from the given repo and var
name
"""
return self._env_vars[repo][var_name]
class RedwoodAdminDeleter:
"""
Deletes files from the AWS S3 buckets used by the Redwood Storage System.
Also, handles any information related to the file deletion.
Attributes
----------
bucket_name : str
The name of the AWS S3 bucket containing the files selected for
deletion.
base_endpoint : str
the base url for the redwood metadata server
data_root_folder : str
The root folder of where all the bundle's files and metadata are saved.
deleted_list_filename : str
The location of the deleted_list file.
redwood_metadata_api : RedwoodFileMetadataAPI
For accessing and editing the file metadata in the redwood metadata
server.
ignore_errors : boolean
If True, prevents errors (except ForbiddenDeleteError and
RedwoodFileNotFoundError for the target deleted file) from
interrupting the deletion process
"""
def __init__(self, dcc_ops_env=None, ignore_errors=False):
"""
Gets the all of the .env variables in DCC-Ops.
Then, checks if the aws keys from the .env are valid. Afterwards, it
initializes the Redwood File Metadata API, and other attributes
Parameters
----------
dcc_ops_env :
Raises
------
RedwoodDeleteInvalidConfigFile
The config file is missing important options
"""
self.env_settings = dcc_ops_env
os.environ['AWS_ACCESS_KEY_ID'] = self.env_settings.get_env_var(
DCCOpsRepo.REDWOOD,
defaults.DCCOPS_ENV_NAME_ACCESS_ID)
os.environ['AWS_SECRET_ACCESS_KEY'] = self.env_settings.get_env_var(
DCCOpsRepo.REDWOOD,
defaults.DCCOPS_ENV_NAME_SECRET_KEY)
self.bucket_name = self.env_settings.get_env_var(
DCCOpsRepo.REDWOOD,
defaults.DCCOPS_ENV_NAME_REDWOOD_BUCKET)
self.base_endpoint = self.env_settings.get_env_var(
DCCOpsRepo.REDWOOD,
defaults.DCCOPS_ENV_NAME_REDWOOD_ENDPOINT)
self.data_root_folder = defaults.METADATA_FILE_ROOT_FOLDER
self.deleted_list_filename = defaults.DELETED_LIST_FILENAME
self.validate_aws_credentials()
self.redwood_metadata_api = RedwoodFileMetadataAPI(self.base_endpoint)
self.ignore_errors = ignore_errors
@staticmethod
def validate_aws_credentials():
"""
Checks if the AWS access key and AWS secret access key is valid.
Uses the list_bucket method to check the aws keys' validity. If they
aren't valid, InvalidAccessKeyId.ClientError is caught and
RedwoodDBadAWSKeyError is thrown instead.
Raises
-------
RedwoodDBadAWSKeys
If aws access keys are invalid
"""
s3_client = boto3.client('s3')
try:
s3_client.list_buckets()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidAccessKeyId':
raise ICDCDBadAWSKeys
else:
raise
def check_file_exists(self, file_name):
"""
Checks if there's a file with the given filename in that bucket.
Parameters
----------
file_name
the file's name that going to be checked
Returns
-------
returns True if a file with the given filename exists
in the bucket otherwise this method returns False
"""
s3_client = boto3.client('s3')
try:
s3_client.head_object(Bucket=self.bucket_name, Key=file_name)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '404':
return False
else:
raise
else:
return True
def delete_file(self, file_uuid, skip_missing_files=False):
"""
Removes the deleted files entry in the metadata database,
Adds a deletion flag in the file's entry in the bundle metadata.
Removes the file's the storage listing in the redwood storage bucket.
Finally, it deletes the file in the redwood storage system.
If the deletion was successful, any information about the deletion is
recorded in the deletion_file_list file in the root folder of the
redwood storage bucket.
Parameters
----------
file_uuid: str
The file_name of the deleted file
Raises
------
RedwoodMissingDataError
(If ignore_errors is disabled)
The deleted file has no file metadata in in the
redwood metadata database
ForbiddenDeleteError
The deleted file contains the bundle metadata
RedwoodFileNotFoundError
(If ignore_errors is disabled)
The bundle data doesn't exist in the redwood storage bucket
"""
logger.info("Starting Deletion for {}...".format(file_uuid))
file_metadata = {}
try:
file_metadata = self.redwood_metadata_api.get_file_metadata(file_uuid)
except RedwoodMissingDataError as e:
if self.ignore_errors:
logging.warn(str(e))
logging.warn("Metadata doesn't exist for this file."
" Skipping metadata related steps.")
else:
raise
if file_metadata:
metadata_filename = defaults.BUNDLE_METADATA_FILENAME
bundle_id = file_metadata['gnosId']
bundle_metadata = self.redwood_metadata_api. \
get_bundle_metadata_info(bundle_id)
if file_metadata['fileName'] == metadata_filename:
raise ForbiddenDeleteError("{} is a bundle metadata file ({})"
" and cannot be"
" deleted".format(bundle_id,
metadata_filename)
)
bundle_metadata_json_uuid = bundle_metadata['content'][0]['id']
logger.info("Found file metadata for {} ({}) from Bundle {}\n"
"Editing Bundle's metadata.json and .meta files"
" ({})...".format(file_metadata['fileName'],
file_uuid,
file_metadata['gnosId'],
bundle_metadata_json_uuid))
try:
self._edit_bundle_metadata(
file_metadata['fileName'],
bundle_metadata_json_uuid)
except RedwoodFileNotFoundError:
if self.ignore_errors:
logging.warn("This bundle ({}) no longer has its metadata"
" in the bucket. Please delete the other"
" files from this"
" bundle".format(file_metadata['gnosId']))
else:
raise
pass
logger.info("Deleting entry in redwood-metadata-db...")
self._clear_metadata_db_entry(file_uuid)
logger.info("Deleting {} ({}) and"
" its endpoint"
" listing file...".format(file_metadata.get('fileName',
'[No Metadata'
' Found]'),
file_uuid))
target_file_name = "{}/{}".format(self.data_root_folder, file_uuid)
listing_info_file_name = "{}/{}.meta".format(self.data_root_folder,
file_uuid)
self._safely_delete_file(target_file_name, always_throw_error=not skip_missing_files)
self._safely_delete_file(listing_info_file_name)
logger.info("Adding file entry in deleted file list for File"
" ({})...".format(file_uuid))
self._record_deletion_data(
file_uuid,
file_metadata.get('fileName', '[No Metadata Found]'),
file_metadata.get('gnosId', '[No Metadata Found]'))
def delete_files_in_bundle(self, bundle_uuid):
fls = self.redwood_metadata_api.get_file_uuids_from_bundle(bundle_uuid)
for file_uuid in fls:
try:
self.delete_file(file_uuid, skip_missing_files=True)
except ForbiddenDeleteError:
logging.error("Skipping Metadata.json file....")
def _safely_delete_file(self, file_name, always_throw_error=False):
"""
Deletes the file if the file exists in the bucket.
Parameters
----------
file_name: str
The deleted file's file name
Raises
------
RedwoodFileNotFoundError
File is not in the redwood storage S3 bucket.
"""
s3_client = boto3.client('s3')
if self.check_file_exists(file_name):
s3_client.delete_object(Bucket=self.bucket_name,
Key=file_name)
elif self.ignore_errors and not always_throw_error:
logger.warn("Unable to delete {}".format(file_name))
else:
raise RedwoodFileNotFoundError(file_name)
def _record_deletion_data(self, file_uuid, file_name, bundle_uuid):
"""
Logs info about the file deletion in a file. The name of the file is
the value of defaults.DELETED_LIST_FILENAME.
The following info is recorded:
-Deleted file's uuid
-Deleted file's name
-Date and time of Deletion
Parameters
----------
file_uuid: str
The file_name of the deleted file
"""
s3_client = boto3.client('s3')
deleted_file_data = BytesIO()
deletion_dict = {'deletedFiles': {'bundles': {}}}
if self.check_file_exists(self.deleted_list_filename):
s3_client.download_fileobj(self.bucket_name,
self.deleted_list_filename,
deleted_file_data)
try:
deletion_dict = json.loads(deleted_file_data.getvalue())
except ValueError:
logger.warn("Deletion History Log "
"format's is incorrect.")
bundle_list = deletion_dict['deletedFiles']['bundles']
date = datetime.datetime.now().strftime('%m-%d-%y %I:%m:%S %p')
bundle_list.setdefault(bundle_uuid, []) \
.append({'file_uuid': file_uuid,
'file_name': file_name,
'date_deleted': date})
deletion_list_bytes = json.dumps(deletion_dict).encode()
if self.check_file_exists(self.deleted_list_filename):
s3_client.put_object(Bucket=self.bucket_name,
Key=self.deleted_list_filename,
Body=deletion_list_bytes)
else:
del_byte_io = BytesIO(deletion_list_bytes)
s3_client.upload_fileobj(del_byte_io,
self.bucket_name,
self.deleted_list_filename)
def _clear_metadata_db_entry(self, file_uuid):
"""
Removes the deleted files entry in a mongo database in the
redwood-metadata-db container
Parameters
----------
file_uuid
The deleted file's file uuid
Raises
-------
MetadataDeleteError
Unable able to delete the deleted file's entry
(if ignore_errors is disabled)
"""
try:
self.redwood_metadata_api.delete_entity(file_uuid)
except MetadataDeleteError as e:
if self.ignore_errors:
logger.warn(str(e))
logger.warn('Unable to delete metadata'
' server entry for file {}'.format(file_uuid))
else:
raise
def _edit_bundle_metadata(self, file_name,
metadata_file_uuid):
"""
This method gets the bundle's metadata.json file in the redwood storage
S3 bucket. Then, in the json file, it finds the deleted file's entry
under "workflow_outputs" key. Afterwards, it adds the is_deleted
flag in the entry. It should look like the following example...
Example
-------
{
...
"workflow_outputs": {
"is_deleted": false
"file_type": "fake",
"file_sha": "fac54a",
"file_path": "fake_file.fakse",
"file_size": 8888
}
}
Finally, the new metadata.json is uploaded to the S3 bucket and the old
metadata is overwritten.
Parameters
----------
file_name: str
the name of the deleted file
metadata_file_uuid
the file_uuid metadata.json of the deleted file's bundle
Raises
-------
RedwoodFileNotFoundError
The metadata.json is not in the S3 Bucket redwood storage
"""
file_location = "{}/{}".format(self.data_root_folder,
metadata_file_uuid)
listing_file_location = "{}.meta".format(file_location)
s3_client = boto3.client('s3')
if self.check_file_exists(file_location):
old_bundle_metadata_file = BytesIO()
s3_client.download_fileobj(self.bucket_name, file_location,
old_bundle_metadata_file)
bundle_metadata_json = json.loads(old_bundle_metadata_file.getvalue())
for wo in bundle_metadata_json["specimen"][0]["samples"][0] \
["analysis"][0]["workflow_outputs"]:
if file_name == wo['file_path']:
wo['is_deleted'] = True
new_bundle_metadata_file = BytesIO()
json.dump(bundle_metadata_json, new_bundle_metadata_file)
s3_client.put_object(Body=new_bundle_metadata_file.getvalue(),
Bucket=self.bucket_name,
Key=file_location)
old_endpoint_info_file = BytesIO()
s3_client.download_fileobj(self.bucket_name, listing_file_location,
old_endpoint_info_file)
listing_info_json = json.loads(old_endpoint_info_file.getvalue())
listing_info_json["objectMd5"] = None
listing_info_json["parts"][0]["sourceMd5"] = None
bundle_metadata_filesize = len(new_bundle_metadata_file.getvalue())
listing_info_json["parts"][0]["partSize"] = bundle_metadata_filesize
listing_info_json["objectSize"] = bundle_metadata_filesize
new_listing_metadata = json.dumps(listing_info_json)
s3_client.put_object(Body=new_listing_metadata,
Bucket=self.bucket_name,
Key=listing_file_location)
client = docker.APIClient()
client.exec_create(defaults.INDEXER_CONTAINER,
['bash', 'update_endpoint_metadata.sh',
metadata_file_uuid])
else:
raise RedwoodFileNotFoundError(metadata_file_uuid)
def run_delete_file_cli(deleter, object_uuid, skip_prompt,
will_delete_bundle=False):
"""
The command interface for deleting a file in AWS S3 Buckets
Parameters
----------
deleter: RedwoodAdminDeleter
The object that manages file deletion
object_uuid
The file_name of the file targeted for deletion
skip_prompt
If this value is True, then the user will not be asked to confirm
the deletion
will_delete_bundle
If this value is True, the confirmation message with change and the
deleter will delete all files in the bundle
"""
resp = ""
if not skip_prompt:
prompt_obj_str = "EVERY FILE IN BUNDLE" if will_delete_bundle else "File"
resp = raw_input("Are you sure you want to delete {} {}?"
" [Y]es/[N]o ".format(prompt_obj_str, object_uuid))
if resp.lower() in {'y', 'yes'} or skip_prompt:
if will_delete_bundle:
try:
deleter.delete_files_in_bundle(object_uuid)
except (RedwoodDeleteError, RedwoodFileNotFoundError) as e:
logger.error(str(e))
logger.error("Deletion Failed")
else:
logger.info("Successfully Deleted "
"All Files from Bundle {}.".format(object_uuid))
else:
try:
deleter.delete_file(object_uuid)
except (RedwoodDeleteError, RedwoodFileNotFoundError) as e:
logger.error(str(e))
logger.error("Deletion Failed")
else:
logger.info("Successfully deleted File {}.".format(object_uuid))
else:
logger.info("DID NOT delete File {}.".format(object_uuid))
def run_cli():
"""
Initiates the command line interface for admin delete.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-s', "--skip-prompt",
help='Skips Confirmation Prompt.',
action="store_true")
parser.add_argument("--ignore-errors",
help='Prevents most errors from interrupting the'
'deletion process',
action="store_true")
parser.add_argument('FILE_UUID',
help='The file uuid of the file that will be'
' deleted.')
parser.add_argument("--delete-bundle", action='store_true')
args = parser.parse_args()
dccops_env_vars = DCCOpsEnv(defaults.DCCOPS_DEFAULT_LOCATION)
if os.getuid() == 0:
try:
deleter = RedwoodAdminDeleter(dccops_env_vars,
ignore_errors=args.ignore_errors)
except ICDCDBadAWSKeys as e:
logger.error(str(e))
logger.error("Please check if your AWS keys are correct.")
else:
run_delete_file_cli(deleter, args.FILE_UUID, args.skip_prompt,
args.delete_bundle)
else:
logger.error("Please run this script as root.")
if __name__ == '__main__':
run_cli()
| []
| []
| [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
]
| [] | ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] | python | 2 | 0 | |
integration/util.go | package main
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/cortexproject/cortex/integration/e2e"
e2edb "github.com/cortexproject/cortex/integration/e2e/db"
)
var (
// Expose some utilities form the framework so that we don't have to prefix them
// with the package name in tests.
mergeFlags = e2e.MergeFlags
newDynamoClient = e2edb.NewDynamoClient
generateSeries = e2e.GenerateSeries
)
func getCortexProjectDir() string {
if dir := os.Getenv("CORTEX_CHECKOUT_DIR"); dir != "" {
return dir
}
return os.Getenv("GOPATH") + "/src/github.com/cortexproject/cortex"
}
func writeFileToSharedDir(s *e2e.Scenario, dst string, content []byte) error {
return ioutil.WriteFile(
filepath.Join(s.SharedDir(), dst),
content,
os.ModePerm)
}
func copyFileToSharedDir(s *e2e.Scenario, src, dst string) error {
content, err := ioutil.ReadFile(filepath.Join(getCortexProjectDir(), src))
if err != nil {
return errors.Wrapf(err, "unable to read local file %s", src)
}
return writeFileToSharedDir(s, dst, content)
}
| [
"\"CORTEX_CHECKOUT_DIR\"",
"\"GOPATH\""
]
| []
| [
"GOPATH",
"CORTEX_CHECKOUT_DIR"
]
| [] | ["GOPATH", "CORTEX_CHECKOUT_DIR"] | go | 2 | 0 | |
storage/storage.go | // Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/pem"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/internal/version"
gapic "cloud.google.com/go/storage/internal/apiv2"
"github.com/googleapis/gax-go/v2"
"golang.org/x/oauth2/google"
"golang.org/x/xerrors"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
raw "google.golang.org/api/storage/v1"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/timestamppb"
)
// Methods which can be used in signed URLs.
var signedURLMethods = map[string]bool{"DELETE": true, "GET": true, "HEAD": true, "POST": true, "PUT": true}
var (
// ErrBucketNotExist indicates that the bucket does not exist.
ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
// ErrObjectNotExist indicates that the object does not exist.
ErrObjectNotExist = errors.New("storage: object doesn't exist")
// errMethodNotValid indicates that given HTTP method is not valid.
errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
)
var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", version.Repo)
const (
// ScopeFullControl grants permissions to manage your
// data and permissions in Google Cloud Storage.
ScopeFullControl = raw.DevstorageFullControlScope
// ScopeReadOnly grants permissions to
// view your data in Google Cloud Storage.
ScopeReadOnly = raw.DevstorageReadOnlyScope
// ScopeReadWrite grants permissions to manage your
// data in Google Cloud Storage.
ScopeReadWrite = raw.DevstorageReadWriteScope
// defaultConnPoolSize is the default number of connections
// to initialize in the GAPIC gRPC connection pool. A larger
// connection pool may be necessary for jobs that require
// high throughput and/or leverage many concurrent streams.
defaultConnPoolSize = 4
)
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
func setClientHeader(headers http.Header) {
headers.Set("x-goog-api-client", xGoogHeader)
}
// Client is a client for interacting with Google Cloud Storage.
//
// Clients should be reused instead of created as needed.
// The methods of Client are safe for concurrent use by multiple goroutines.
type Client struct {
hc *http.Client
raw *raw.Service
// Scheme describes the scheme under the current host.
scheme string
// ReadHost is the default host used on the reader.
readHost string
// May be nil.
creds *google.Credentials
retry *retryConfig
// gc is an optional gRPC-based, GAPIC client.
//
// This is an experimental field and not intended for public use.
gc *gapic.Client
}
// NewClient creates a new Google Cloud Storage client.
// The default scope is ScopeFullControl. To use a different scope, like
// ScopeReadOnly, use option.WithScopes.
//
// Clients should be reused instead of created as needed. The methods of Client
// are safe for concurrent use by multiple goroutines.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
var creds *google.Credentials
// In general, it is recommended to use raw.NewService instead of htransport.NewClient
// since raw.NewService configures the correct default endpoints when initializing the
// internal http client. However, in our case, "NewRangeReader" in reader.go needs to
// access the http client directly to make requests, so we create the client manually
// here so it can be re-used by both reader.go and raw.NewService. This means we need to
// manually configure the default endpoint options on the http client. Furthermore, we
// need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints.
if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" {
// Prepend default options to avoid overriding options passed by the user.
opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/"))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"))
// Don't error out here. The user may have passed in their own HTTP
// client which does not auth with ADC or other common conventions.
c, err := transport.Creds(ctx, opts...)
if err == nil {
creds = c
opts = append(opts, internaloption.WithCredentials(creds))
}
} else {
var hostURL *url.URL
if strings.Contains(host, "://") {
h, err := url.Parse(host)
if err != nil {
return nil, err
}
hostURL = h
} else {
// Add scheme for user if not supplied in STORAGE_EMULATOR_HOST
// URL is only parsed correctly if it has a scheme, so we build it ourselves
hostURL = &url.URL{Scheme: "http", Host: host}
}
hostURL.Path = "storage/v1/"
endpoint := hostURL.String()
// Append the emulator host as default endpoint for the user
opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(endpoint))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(endpoint))
}
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
hc, ep, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
// RawService should be created with the chosen endpoint to take account of user override.
rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc))
if err != nil {
return nil, fmt.Errorf("storage client: %v", err)
}
// Update readHost and scheme with the chosen endpoint.
u, err := url.Parse(ep)
if err != nil {
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
}
return &Client{
hc: hc,
raw: rawService,
scheme: u.Scheme,
readHost: u.Host,
creds: creds,
}, nil
}
// hybridClientOptions carries the set of client options for HTTP and gRPC clients.
type hybridClientOptions struct {
HTTPOpts []option.ClientOption
GRPCOpts []option.ClientOption
}
// newHybridClient creates a new Storage client that initializes a gRPC-based client
// for media upload and download operations.
//
// This is an experimental API and not intended for public use.
func newHybridClient(ctx context.Context, opts *hybridClientOptions) (*Client, error) {
if opts == nil {
opts = &hybridClientOptions{}
}
opts.GRPCOpts = append(defaultGRPCOptions(), opts.GRPCOpts...)
c, err := NewClient(ctx, opts.HTTPOpts...)
if err != nil {
return nil, err
}
g, err := gapic.NewClient(ctx, opts.GRPCOpts...)
if err != nil {
return nil, err
}
c.gc = g
return c, nil
}
// defaultGRPCOptions returns a set of the default client options
// for gRPC client initialization.
func defaultGRPCOptions() []option.ClientOption {
return []option.ClientOption{
option.WithGRPCConnectionPool(defaultConnPoolSize),
}
}
// Close closes the Client.
//
// Close need not be called at program exit.
func (c *Client) Close() error {
// Set fields to nil so that subsequent uses will panic.
c.hc = nil
c.raw = nil
c.creds = nil
if c.gc != nil {
return c.gc.Close()
}
return nil
}
// SigningScheme determines the API version to use when signing URLs.
type SigningScheme int
const (
// SigningSchemeDefault is presently V2 and will change to V4 in the future.
SigningSchemeDefault SigningScheme = iota
// SigningSchemeV2 uses the V2 scheme to sign URLs.
SigningSchemeV2
// SigningSchemeV4 uses the V4 scheme to sign URLs.
SigningSchemeV4
)
// URLStyle determines the style to use for the signed URL. pathStyle is the
// default. All non-default options work with V4 scheme only. See
// https://cloud.google.com/storage/docs/request-endpoints for details.
type URLStyle interface {
// host should return the host portion of the signed URL, not including
// the scheme (e.g. storage.googleapis.com).
host(bucket string) string
// path should return the path portion of the signed URL, which may include
// both the bucket and object name or only the object name depending on the
// style.
path(bucket, object string) string
}
type pathStyle struct{}
type virtualHostedStyle struct{}
type bucketBoundHostname struct {
hostname string
}
func (s pathStyle) host(bucket string) string {
return "storage.googleapis.com"
}
func (s virtualHostedStyle) host(bucket string) string {
return bucket + ".storage.googleapis.com"
}
func (s bucketBoundHostname) host(bucket string) string {
return s.hostname
}
func (s pathStyle) path(bucket, object string) string {
p := bucket
if object != "" {
p += "/" + object
}
return p
}
func (s virtualHostedStyle) path(bucket, object string) string {
return object
}
func (s bucketBoundHostname) path(bucket, object string) string {
return object
}
// PathStyle is the default style, and will generate a URL of the form
// "storage.googleapis.com/<bucket-name>/<object-name>".
func PathStyle() URLStyle {
return pathStyle{}
}
// VirtualHostedStyle generates a URL relative to the bucket's virtual
// hostname, e.g. "<bucket-name>.storage.googleapis.com/<object-name>".
func VirtualHostedStyle() URLStyle {
return virtualHostedStyle{}
}
// BucketBoundHostname generates a URL with a custom hostname tied to a
// specific GCS bucket. The desired hostname should be passed in using the
// hostname argument. Generated urls will be of the form
// "<bucket-bound-hostname>/<object-name>". See
// https://cloud.google.com/storage/docs/request-endpoints#cname and
// https://cloud.google.com/load-balancing/docs/https/adding-backend-buckets-to-load-balancers
// for details. Note that for CNAMEs, only HTTP is supported, so Insecure must
// be set to true.
func BucketBoundHostname(hostname string) URLStyle {
return bucketBoundHostname{hostname: hostname}
}
// SignedURLOptions allows you to restrict the access to the signed URL.
type SignedURLOptions struct {
// GoogleAccessID represents the authorizer of the signed URL generation.
// It is typically the Google service account client email address from
// the Google Developers Console in the form of "[email protected]".
// Required.
GoogleAccessID string
// PrivateKey is the Google service account private key. It is obtainable
// from the Google Developers Console.
// At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
// create a service account client ID or reuse one of your existing service account
// credentials. Click on the "Generate new P12 key" to generate and download
// a new private key. Once you download the P12 file, use the following command
// to convert it into a PEM file.
//
// $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
//
// Provide the contents of the PEM file as a byte slice.
// Exactly one of PrivateKey or SignBytes must be non-nil.
PrivateKey []byte
// SignBytes is a function for implementing custom signing. For example, if
// your application is running on Google App Engine, you can use
// appengine's internal signing function:
// ctx := appengine.NewContext(request)
// acc, _ := appengine.ServiceAccount(ctx)
// url, err := SignedURL("bucket", "object", &SignedURLOptions{
// GoogleAccessID: acc,
// SignBytes: func(b []byte) ([]byte, error) {
// _, signedBytes, err := appengine.SignBytes(ctx, b)
// return signedBytes, err
// },
// // etc.
// })
//
// Exactly one of PrivateKey or SignBytes must be non-nil.
SignBytes func([]byte) ([]byte, error)
// Method is the HTTP method to be used with the signed URL.
// Signed URLs can be used with GET, HEAD, PUT, and DELETE requests.
// Required.
Method string
// Expires is the expiration time on the signed URL. It must be
// a datetime in the future. For SigningSchemeV4, the expiration may be no
// more than seven days in the future.
// Required.
Expires time.Time
// ContentType is the content type header the client must provide
// to use the generated signed URL.
// Optional.
ContentType string
// Headers is a list of extension headers the client must provide
// in order to use the generated signed URL. Each must be a string of the
// form "key:values", with multiple values separated by a semicolon.
// Optional.
Headers []string
// QueryParameters is a map of additional query parameters. When
// SigningScheme is V4, this is used in computing the signature, and the
// client must use the same query parameters when using the generated signed
// URL.
// Optional.
QueryParameters url.Values
// MD5 is the base64 encoded MD5 checksum of the file.
// If provided, the client should provide the exact value on the request
// header in order to use the signed URL.
// Optional.
MD5 string
// Style provides options for the type of URL to use. Options are
// PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See
// https://cloud.google.com/storage/docs/request-endpoints for details.
// Only supported for V4 signing.
// Optional.
Style URLStyle
// Insecure determines whether the signed URL should use HTTPS (default) or
// HTTP.
// Only supported for V4 signing.
// Optional.
Insecure bool
// Scheme determines the version of URL signing to use. Default is
// SigningSchemeV2.
Scheme SigningScheme
}
func (opts *SignedURLOptions) clone() *SignedURLOptions {
return &SignedURLOptions{
GoogleAccessID: opts.GoogleAccessID,
SignBytes: opts.SignBytes,
PrivateKey: opts.PrivateKey,
Method: opts.Method,
Expires: opts.Expires,
ContentType: opts.ContentType,
Headers: opts.Headers,
QueryParameters: opts.QueryParameters,
MD5: opts.MD5,
Style: opts.Style,
Insecure: opts.Insecure,
Scheme: opts.Scheme,
}
}
var (
tabRegex = regexp.MustCompile(`[\t]+`)
// I was tempted to call this spacex. :)
spaceRegex = regexp.MustCompile(` +`)
canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`)
excludedCanonicalHeaders = map[string]bool{
"x-goog-encryption-key": true,
"x-goog-encryption-key-sha256": true,
}
)
// v2SanitizeHeaders applies the specifications for canonical extension headers at
// https://cloud.google.com/storage/docs/access-control/signed-urls-v2#about-canonical-extension-headers
func v2SanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
var header, value string
// Only keep canonical headers, discard any others.
headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader)
if len(headerMatches) == 0 {
continue
}
header = headerMatches[1]
value = headerMatches[2]
header = strings.ToLower(strings.TrimSpace(header))
value = strings.TrimSpace(value)
if excludedCanonicalHeaders[header] {
// Do not keep any deliberately excluded canonical headers when signing.
continue
}
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[header] = append(headerMap[header], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the header name
// from the header value or around the values themselves. The values
// should be separated by commas.
//
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases anyway and we
// should assume that there will be no canonical headers using empty
// values. Any such headers are discarded at the regexp stage above.
sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ",")))
}
sort.Strings(sanitizedHeaders)
return sanitizedHeaders
}
// v4SanitizeHeaders applies the specifications for canonical extension headers
// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers.
//
// V4 does a couple things differently from V2:
// - Headers get sorted by key, instead of by key:value. We do this in
// signedURLV4.
// - There's no canonical regexp: we simply split headers on :.
// - We don't exclude canonical headers.
// - We replace leading and trailing spaces in header values, like v2, but also
// all intermediate space duplicates get stripped. That is, there's only ever
// a single consecutive space.
func v4SanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
var key, value string
headerMatches := strings.Split(sanitizedHeader, ":")
if len(headerMatches) < 2 {
continue
}
key = headerMatches[0]
value = headerMatches[1]
key = strings.ToLower(strings.TrimSpace(key))
value = strings.TrimSpace(value)
value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" ")))
value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t")))
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[key] = append(headerMap[key], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the header name
// from the header value or around the values themselves. The values
// should be separated by commas.
//
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases anyway and we
// should assume that there will be no canonical headers using empty
// values. Any such headers are discarded at the regexp stage above.
sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ",")))
}
return sanitizedHeaders
}
// SignedURL returns a URL for the specified object. Signed URLs allow anyone
// access to a restricted resource for a limited time without needing a
// Google account or signing in. For more information about signed URLs, see
// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
func SignedURL(bucket, object string, opts *SignedURLOptions) (string, error) {
now := utcNow()
if err := validateOptions(opts, now); err != nil {
return "", err
}
switch opts.Scheme {
case SigningSchemeV2:
opts.Headers = v2SanitizeHeaders(opts.Headers)
return signedURLV2(bucket, object, opts)
case SigningSchemeV4:
opts.Headers = v4SanitizeHeaders(opts.Headers)
return signedURLV4(bucket, object, opts, now)
default: // SigningSchemeDefault
opts.Headers = v2SanitizeHeaders(opts.Headers)
return signedURLV2(bucket, object, opts)
}
}
func validateOptions(opts *SignedURLOptions, now time.Time) error {
if opts == nil {
return errors.New("storage: missing required SignedURLOptions")
}
if opts.GoogleAccessID == "" {
return errors.New("storage: missing required GoogleAccessID")
}
if (opts.PrivateKey == nil) == (opts.SignBytes == nil) {
return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
}
opts.Method = strings.ToUpper(opts.Method)
if _, ok := signedURLMethods[opts.Method]; !ok {
return errMethodNotValid
}
if opts.Expires.IsZero() {
return errors.New("storage: missing required expires option")
}
if opts.MD5 != "" {
md5, err := base64.StdEncoding.DecodeString(opts.MD5)
if err != nil || len(md5) != 16 {
return errors.New("storage: invalid MD5 checksum")
}
}
if opts.Style == nil {
opts.Style = PathStyle()
}
if _, ok := opts.Style.(pathStyle); !ok && opts.Scheme == SigningSchemeV2 {
return errors.New("storage: only path-style URLs are permitted with SigningSchemeV2")
}
if opts.Scheme == SigningSchemeV4 {
cutoff := now.Add(604801 * time.Second) // 7 days + 1 second
if !opts.Expires.Before(cutoff) {
return errors.New("storage: expires must be within seven days from now")
}
}
return nil
}
const (
iso8601 = "20060102T150405Z"
yearMonthDay = "20060102"
)
// utcNow returns the current time in UTC and is a variable to allow for
// reassignment in tests to provide deterministic signed URL values.
var utcNow = func() time.Time {
return time.Now().UTC()
}
// extractHeaderNames takes in a series of key:value headers and returns the
// header names only.
func extractHeaderNames(kvs []string) []string {
var res []string
for _, header := range kvs {
nameValue := strings.Split(header, ":")
res = append(res, nameValue[0])
}
return res
}
// pathEncodeV4 creates an encoded string that matches the v4 signature spec.
// Following the spec precisely is necessary in order to ensure that the URL
// and signing string are correctly formed, and Go's url.PathEncode and
// url.QueryEncode don't generate an exact match without some additional logic.
func pathEncodeV4(path string) string {
segments := strings.Split(path, "/")
var encodedSegments []string
for _, s := range segments {
encodedSegments = append(encodedSegments, url.QueryEscape(s))
}
encodedStr := strings.Join(encodedSegments, "/")
encodedStr = strings.Replace(encodedStr, "+", "%20", -1)
return encodedStr
}
// signedURLV4 creates a signed URL using the sigV4 algorithm.
func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) {
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "%s\n", opts.Method)
u := &url.URL{Path: opts.Style.path(bucket, name)}
u.RawPath = pathEncodeV4(u.Path)
// Note: we have to add a / here because GCS does so auto-magically, despite
// our encoding not doing so (and we have to exactly match their
// canonical query).
fmt.Fprintf(buf, "/%s\n", u.RawPath)
headerNames := append(extractHeaderNames(opts.Headers), "host")
if opts.ContentType != "" {
headerNames = append(headerNames, "content-type")
}
if opts.MD5 != "" {
headerNames = append(headerNames, "content-md5")
}
sort.Strings(headerNames)
signedHeaders := strings.Join(headerNames, ";")
timestamp := now.Format(iso8601)
credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay))
canonicalQueryString := url.Values{
"X-Goog-Algorithm": {"GOOG4-RSA-SHA256"},
"X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)},
"X-Goog-Date": {timestamp},
"X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))},
"X-Goog-SignedHeaders": {signedHeaders},
}
// Add user-supplied query parameters to the canonical query string. For V4,
// it's necessary to include these.
for k, v := range opts.QueryParameters {
canonicalQueryString[k] = append(canonicalQueryString[k], v...)
}
// url.Values.Encode escaping is correct, except that a space must be replaced
// by `%20` rather than `+`.
escapedQuery := strings.Replace(canonicalQueryString.Encode(), "+", "%20", -1)
fmt.Fprintf(buf, "%s\n", escapedQuery)
// Fill in the hostname based on the desired URL style.
u.Host = opts.Style.host(bucket)
// Fill in the URL scheme.
if opts.Insecure {
u.Scheme = "http"
} else {
u.Scheme = "https"
}
var headersWithValue []string
headersWithValue = append(headersWithValue, "host:"+u.Host)
headersWithValue = append(headersWithValue, opts.Headers...)
if opts.ContentType != "" {
headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType)
}
if opts.MD5 != "" {
headersWithValue = append(headersWithValue, "content-md5:"+opts.MD5)
}
// Trim extra whitespace from headers and replace with a single space.
var trimmedHeaders []string
for _, h := range headersWithValue {
trimmedHeaders = append(trimmedHeaders, strings.Join(strings.Fields(h), " "))
}
canonicalHeaders := strings.Join(sortHeadersByKey(trimmedHeaders), "\n")
fmt.Fprintf(buf, "%s\n\n", canonicalHeaders)
fmt.Fprintf(buf, "%s\n", signedHeaders)
// If the user provides a value for X-Goog-Content-SHA256, we must use
// that value in the request string. If not, we use UNSIGNED-PAYLOAD.
sha256Header := false
for _, h := range trimmedHeaders {
if strings.HasPrefix(strings.ToLower(h), "x-goog-content-sha256") && strings.Contains(h, ":") {
sha256Header = true
fmt.Fprintf(buf, "%s", strings.SplitN(h, ":", 2)[1])
break
}
}
if !sha256Header {
fmt.Fprint(buf, "UNSIGNED-PAYLOAD")
}
sum := sha256.Sum256(buf.Bytes())
hexDigest := hex.EncodeToString(sum[:])
signBuf := &bytes.Buffer{}
fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n")
fmt.Fprintf(signBuf, "%s\n", timestamp)
fmt.Fprintf(signBuf, "%s\n", credentialScope)
fmt.Fprintf(signBuf, "%s", hexDigest)
signBytes := opts.SignBytes
if opts.PrivateKey != nil {
key, err := parseKey(opts.PrivateKey)
if err != nil {
return "", err
}
signBytes = func(b []byte) ([]byte, error) {
sum := sha256.Sum256(b)
return rsa.SignPKCS1v15(
rand.Reader,
key,
crypto.SHA256,
sum[:],
)
}
}
b, err := signBytes(signBuf.Bytes())
if err != nil {
return "", err
}
signature := hex.EncodeToString(b)
canonicalQueryString.Set("X-Goog-Signature", string(signature))
u.RawQuery = canonicalQueryString.Encode()
return u.String(), nil
}
// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header
// key.
func sortHeadersByKey(hdrs []string) []string {
headersMap := map[string]string{}
var headersKeys []string
for _, h := range hdrs {
parts := strings.Split(h, ":")
k := parts[0]
v := parts[1]
headersMap[k] = v
headersKeys = append(headersKeys, k)
}
sort.Strings(headersKeys)
var sorted []string
for _, k := range headersKeys {
v := headersMap[k]
sorted = append(sorted, fmt.Sprintf("%s:%s", k, v))
}
return sorted
}
func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
signBytes := opts.SignBytes
if opts.PrivateKey != nil {
key, err := parseKey(opts.PrivateKey)
if err != nil {
return "", err
}
signBytes = func(b []byte) ([]byte, error) {
sum := sha256.Sum256(b)
return rsa.SignPKCS1v15(
rand.Reader,
key,
crypto.SHA256,
sum[:],
)
}
}
u := &url.URL{
Path: fmt.Sprintf("/%s/%s", bucket, name),
}
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "%s\n", opts.Method)
fmt.Fprintf(buf, "%s\n", opts.MD5)
fmt.Fprintf(buf, "%s\n", opts.ContentType)
fmt.Fprintf(buf, "%d\n", opts.Expires.Unix())
if len(opts.Headers) > 0 {
fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n"))
}
fmt.Fprintf(buf, "%s", u.String())
b, err := signBytes(buf.Bytes())
if err != nil {
return "", err
}
encoded := base64.StdEncoding.EncodeToString(b)
u.Scheme = "https"
u.Host = "storage.googleapis.com"
q := u.Query()
q.Set("GoogleAccessId", opts.GoogleAccessID)
q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix()))
q.Set("Signature", string(encoded))
u.RawQuery = q.Encode()
return u.String(), nil
}
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
// Use BucketHandle.Object to get a handle.
type ObjectHandle struct {
c *Client
bucket string
object string
acl ACLHandle
gen int64 // a negative value indicates latest
conds *Conditions
encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets
readCompressed bool // Accept-Encoding: gzip
retry *retryConfig
}
// ACL provides access to the object's access control list.
// This controls who can read and write this object.
// This call does not perform any network operations.
func (o *ObjectHandle) ACL() *ACLHandle {
return &o.acl
}
// Generation returns a new ObjectHandle that operates on a specific generation
// of the object.
// By default, the handle operates on the latest generation. Not
// all operations work when given a specific generation; check the API
// endpoints at https://cloud.google.com/storage/docs/json_api/ for details.
func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
o2 := *o
o2.gen = gen
return &o2
}
// If returns a new ObjectHandle that applies a set of preconditions.
// Preconditions already set on the ObjectHandle are ignored.
// Operations on the new handle will return an error if the preconditions are not
// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
// for more details.
func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
o2 := *o
o2.conds = &conds
return &o2
}
// Key returns a new ObjectHandle that uses the supplied encryption
// key to encrypt and decrypt the object's contents.
//
// Encryption key must be a 32-byte AES-256 key.
// See https://cloud.google.com/storage/docs/encryption for details.
func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
o2 := *o
o2.encryptionKey = encryptionKey
return &o2
}
// Attrs returns meta information about the object.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
return nil, err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, true)
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
return nil, err
}
return newObject(obj), nil
}
// Update updates an object with the provided attributes. See
// ObjectAttrsToUpdate docs for details on treatment of zero values.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
var attrs ObjectAttrs
// Lists of fields to send, and set to null, in the JSON.
var forceSendFields, nullFields []string
if uattrs.ContentType != nil {
attrs.ContentType = optional.ToString(uattrs.ContentType)
// For ContentType, sending the empty string is a no-op.
// Instead we send a null.
if attrs.ContentType == "" {
nullFields = append(nullFields, "ContentType")
} else {
forceSendFields = append(forceSendFields, "ContentType")
}
}
if uattrs.ContentLanguage != nil {
attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
// For ContentLanguage it's an error to send the empty string.
// Instead we send a null.
if attrs.ContentLanguage == "" {
nullFields = append(nullFields, "ContentLanguage")
} else {
forceSendFields = append(forceSendFields, "ContentLanguage")
}
}
if uattrs.ContentEncoding != nil {
attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
forceSendFields = append(forceSendFields, "ContentEncoding")
}
if uattrs.ContentDisposition != nil {
attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
forceSendFields = append(forceSendFields, "ContentDisposition")
}
if uattrs.CacheControl != nil {
attrs.CacheControl = optional.ToString(uattrs.CacheControl)
forceSendFields = append(forceSendFields, "CacheControl")
}
if uattrs.EventBasedHold != nil {
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
forceSendFields = append(forceSendFields, "EventBasedHold")
}
if uattrs.TemporaryHold != nil {
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
forceSendFields = append(forceSendFields, "TemporaryHold")
}
if !uattrs.CustomTime.IsZero() {
attrs.CustomTime = uattrs.CustomTime
forceSendFields = append(forceSendFields, "CustomTime")
}
if uattrs.Metadata != nil {
attrs.Metadata = uattrs.Metadata
if len(attrs.Metadata) == 0 {
// Sending the empty map is a no-op. We send null instead.
nullFields = append(nullFields, "Metadata")
} else {
forceSendFields = append(forceSendFields, "Metadata")
}
}
if uattrs.ACL != nil {
attrs.ACL = uattrs.ACL
// It's an error to attempt to delete the ACL, so
// we don't append to nullFields here.
forceSendFields = append(forceSendFields, "Acl")
}
rawObj := attrs.toRawObject(o.bucket)
rawObj.ForceSendFields = forceSendFields
rawObj.NullFields = nullFields
call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx)
if err := applyConds("Update", o.gen, o.conds, call); err != nil {
return nil, err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
var isIdempotent bool
if o.conds != nil && o.conds.MetagenerationMatch != 0 {
isIdempotent = true
}
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, isIdempotent)
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
return nil, err
}
return newObject(obj), nil
}
// BucketName returns the name of the bucket.
func (o *ObjectHandle) BucketName() string {
return o.bucket
}
// ObjectName returns the name of the object.
func (o *ObjectHandle) ObjectName() string {
return o.object
}
// ObjectAttrsToUpdate is used to update the attributes of an object.
// Only fields set to non-nil values will be updated.
// For all fields except CustomTime, set the field to its zero value to delete
// it. CustomTime cannot be deleted or changed to an earlier time once set.
//
// For example, to change ContentType and delete ContentEncoding and
// Metadata, use
// ObjectAttrsToUpdate{
// ContentType: "text/html",
// ContentEncoding: "",
// Metadata: map[string]string{},
// }
type ObjectAttrsToUpdate struct {
EventBasedHold optional.Bool
TemporaryHold optional.Bool
ContentType optional.String
ContentLanguage optional.String
ContentEncoding optional.String
ContentDisposition optional.String
CacheControl optional.String
CustomTime time.Time // Cannot be deleted or backdated from its current value.
Metadata map[string]string // Set to map[string]string{} to delete.
ACL []ACLRule
// If not empty, applies a predefined set of access controls. ACL must be nil.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
PredefinedACL string
}
// Delete deletes the single specified object.
func (o *ObjectHandle) Delete(ctx context.Context) error {
if err := o.validate(); err != nil {
return err
}
call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
return err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
// Encryption doesn't apply to Delete.
setClientHeader(call.Header())
var isIdempotent bool
// Delete is idempotent if GenerationMatch or Generation have been passed in.
// The default generation is negative to get the latest version of the object.
if (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 {
isIdempotent = true
}
err := run(ctx, func() error { return call.Do() }, o.retry, isIdempotent)
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
return ErrObjectNotExist
}
return err
}
// ReadCompressed when true causes the read to happen without decompressing.
func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
o2 := *o
o2.readCompressed = compressed
return &o2
}
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
// A new object will be created unless an object with this name already exists.
// Otherwise any previous object with the same name will be replaced.
// The object will not be available (and any previous object will remain)
// until Close has been called.
//
// Attributes can be set on the object by modifying the returned Writer's
// ObjectAttrs field before the first call to Write. If no ContentType
// attribute is specified, the content type will be automatically sniffed
// using net/http.DetectContentType.
//
// It is the caller's responsibility to call Close when writing is done. To
// stop writing without saving the data, cancel the context.
func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
return &Writer{
ctx: ctx,
o: o,
donec: make(chan struct{}),
ObjectAttrs: ObjectAttrs{Name: o.object},
ChunkSize: googleapi.DefaultUploadChunkSize,
}
}
func (o *ObjectHandle) validate() error {
if o.bucket == "" {
return errors.New("storage: bucket name is empty")
}
if o.object == "" {
return errors.New("storage: object name is empty")
}
if !utf8.ValidString(o.object) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
}
return nil
}
// parseKey converts the binary contents of a private key file to an
// *rsa.PrivateKey. It detects whether the private key is in a PEM container or
// not. If so, it extracts the private key from PEM container before
// conversion. It only supports PEM containers with no passphrase.
func parseKey(key []byte) (*rsa.PrivateKey, error) {
if block, _ := pem.Decode(key); block != nil {
key = block.Bytes
}
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
return nil, err
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("oauth2: private key is invalid")
}
return parsed, nil
}
// toRawObject copies the editable attributes from o to the raw library's Object type.
func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
var ret string
if !o.RetentionExpirationTime.IsZero() {
ret = o.RetentionExpirationTime.Format(time.RFC3339)
}
var ct string
if !o.CustomTime.IsZero() {
ct = o.CustomTime.Format(time.RFC3339)
}
return &raw.Object{
Bucket: bucket,
Name: o.Name,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: ret,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: toRawObjectACL(o.ACL),
Metadata: o.Metadata,
CustomTime: ct,
}
}
// toProtoObject copies the editable attributes from o to the proto library's Object type.
func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
checksums := &storagepb.ObjectChecksums{Md5Hash: o.MD5}
if o.CRC32C > 0 {
checksums.Crc32C = proto.Uint32(o.CRC32C)
}
// For now, there are only globally unique buckets, and "_" is the alias
// project ID for such buckets.
b = bucketResourceName("_", b)
return &storagepb.Object{
Bucket: b,
Name: o.Name,
EventBasedHold: proto.Bool(o.EventBasedHold),
TemporaryHold: o.TemporaryHold,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: toProtoObjectACL(o.ACL),
Metadata: o.Metadata,
CreateTime: toProtoTimestamp(o.Created),
CustomTime: toProtoTimestamp(o.CustomTime),
DeleteTime: toProtoTimestamp(o.Deleted),
RetentionExpireTime: toProtoTimestamp(o.RetentionExpirationTime),
UpdateTime: toProtoTimestamp(o.Updated),
KmsKey: o.KMSKeyName,
Generation: o.Generation,
Size: o.Size,
Checksums: checksums,
}
}
// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object.
type ObjectAttrs struct {
// Bucket is the name of the bucket containing this GCS object.
// This field is read-only.
Bucket string
// Name is the name of the object within the bucket.
// This field is read-only.
Name string
// ContentType is the MIME type of the object's content.
ContentType string
// ContentLanguage is the content language of the object's content.
ContentLanguage string
// CacheControl is the Cache-Control header to be sent in the response
// headers when serving the object data.
CacheControl string
// EventBasedHold specifies whether an object is under event-based hold. New
// objects created in a bucket whose DefaultEventBasedHold is set will
// default to that value.
EventBasedHold bool
// TemporaryHold specifies whether an object is under temporary hold. While
// this flag is set to true, the object is protected against deletion and
// overwrites.
TemporaryHold bool
// RetentionExpirationTime is a server-determined value that specifies the
// earliest time that the object's retention period expires.
// This is a read-only field.
RetentionExpirationTime time.Time
// ACL is the list of access control rules for the object.
ACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when writing, copying or composing an object. When copying or composing,
// it acts as the destinationPredefinedAcl parameter.
// PredefinedACL is always empty for ObjectAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/insert
// for valid values.
PredefinedACL string
// Owner is the owner of the object. This field is read-only.
//
// If non-zero, it is in the form of "user-<userId>".
Owner string
// Size is the length of the object's content. This field is read-only.
Size int64
// ContentEncoding is the encoding of the object's content.
ContentEncoding string
// ContentDisposition is the optional Content-Disposition header of the object
// sent in the response headers.
ContentDisposition string
// MD5 is the MD5 hash of the object's content. This field is read-only,
// except when used from a Writer. If set on a Writer, the uploaded
// data is rejected if its MD5 hash does not match this field.
MD5 []byte
// CRC32C is the CRC32 checksum of the object's content using the Castagnoli93
// polynomial. This field is read-only, except when used from a Writer or
// Composer. In those cases, if the SendCRC32C field in the Writer or Composer
// is set to is true, the uploaded data is rejected if its CRC32C hash does
// not match this field.
CRC32C uint32
// MediaLink is an URL to the object's content. This field is read-only.
MediaLink string
// Metadata represents user-provided metadata, in key/value pairs.
// It can be nil if no metadata is provided.
Metadata map[string]string
// Generation is the generation number of the object's content.
// This field is read-only.
Generation int64
// Metageneration is the version of the metadata for this
// object at this generation. This field is used for preconditions
// and for detecting changes in metadata. A metageneration number
// is only meaningful in the context of a particular generation
// of a particular object. This field is read-only.
Metageneration int64
// StorageClass is the storage class of the object. This defines
// how objects are stored and determines the SLA and the cost of storage.
// Typical values are "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE".
// Defaults to "STANDARD".
// See https://cloud.google.com/storage/docs/storage-classes for all
// valid values.
StorageClass string
// Created is the time the object was created. This field is read-only.
Created time.Time
// Deleted is the time the object was deleted.
// If not deleted, it is the zero value. This field is read-only.
Deleted time.Time
// Updated is the creation or modification time of the object.
// For buckets with versioning enabled, changing an object's
// metadata does not change this property. This field is read-only.
Updated time.Time
// CustomerKeySHA256 is the base64-encoded SHA-256 hash of the
// customer-supplied encryption key for the object. It is empty if there is
// no customer-supplied encryption key.
// See // https://cloud.google.com/storage/docs/encryption for more about
// encryption in Google Cloud Storage.
CustomerKeySHA256 string
// Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object,
// if the object is encrypted by such a key.
//
// Providing both a KMSKeyName and a customer-supplied encryption key (via
// ObjectHandle.Key) will result in an error when writing an object.
KMSKeyName string
// Prefix is set only for ObjectAttrs which represent synthetic "directory
// entries" when iterating over buckets using Query.Delimiter. See
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
// populated.
Prefix string
// Etag is the HTTP/1.1 Entity tag for the object.
// This field is read-only.
Etag string
// A user-specified timestamp which can be applied to an object. This is
// typically set in order to use the CustomTimeBefore and DaysSinceCustomTime
// LifecycleConditions to manage object lifecycles.
//
// CustomTime cannot be removed once set on an object. It can be updated to a
// later value but not to an earlier one. For more information see
// https://cloud.google.com/storage/docs/metadata#custom-time .
CustomTime time.Time
}
// convertTime converts a time in RFC3339 format to time.Time.
// If any error occurs in parsing, the zero-value time.Time is silently returned.
func convertTime(t string) time.Time {
var r time.Time
if t != "" {
r, _ = time.Parse(time.RFC3339, t)
}
return r
}
func convertProtoTime(t *timestamppb.Timestamp) time.Time {
var r time.Time
if t != nil {
r = t.AsTime()
}
return r
}
func toProtoTimestamp(t time.Time) *timestamppb.Timestamp {
if t.IsZero() {
return nil
}
return timestamppb.New(t)
}
func newObject(o *raw.Object) *ObjectAttrs {
if o == nil {
return nil
}
owner := ""
if o.Owner != nil {
owner = o.Owner.Entity
}
md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)
crc32c, _ := decodeUint32(o.Crc32c)
var sha256 string
if o.CustomerEncryption != nil {
sha256 = o.CustomerEncryption.KeySha256
}
return &ObjectAttrs{
Bucket: o.Bucket,
Name: o.Name,
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: convertTime(o.RetentionExpirationTime),
ACL: toObjectACLRules(o.Acl),
Owner: owner,
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
Size: int64(o.Size),
MD5: md5,
CRC32C: crc32c,
MediaLink: o.MediaLink,
Metadata: o.Metadata,
Generation: o.Generation,
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),
Etag: o.Etag,
CustomTime: convertTime(o.CustomTime),
}
}
func newObjectFromProto(o *storagepb.Object) *ObjectAttrs {
if o == nil {
return nil
}
return &ObjectAttrs{
Bucket: parseBucketName(o.Bucket),
Name: o.Name,
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
EventBasedHold: o.GetEventBasedHold(),
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: convertProtoTime(o.GetRetentionExpireTime()),
ACL: fromProtoToObjectACLRules(o.GetAcl()),
Owner: o.GetOwner().GetEntity(),
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
Size: int64(o.Size),
MD5: o.GetChecksums().GetMd5Hash(),
CRC32C: o.GetChecksums().GetCrc32C(),
Metadata: o.Metadata,
Generation: o.Generation,
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: string(o.GetCustomerEncryption().GetKeySha256Bytes()),
KMSKeyName: o.GetKmsKey(),
Created: convertProtoTime(o.GetCreateTime()),
Deleted: convertProtoTime(o.GetDeleteTime()),
Updated: convertProtoTime(o.GetUpdateTime()),
CustomTime: convertProtoTime(o.GetCustomTime()),
}
}
// Decode a uint32 encoded in Base64 in big-endian byte order.
func decodeUint32(b64 string) (uint32, error) {
d, err := base64.StdEncoding.DecodeString(b64)
if err != nil {
return 0, err
}
if len(d) != 4 {
return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d)
}
return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil
}
// Encode a uint32 as Base64 in big-endian byte order.
func encodeUint32(u uint32) string {
b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)}
return base64.StdEncoding.EncodeToString(b)
}
// Projection is enumerated type for Query.Projection.
type Projection int
const (
// ProjectionDefault returns all fields of objects.
ProjectionDefault Projection = iota
// ProjectionFull returns all fields of objects.
ProjectionFull
// ProjectionNoACL returns all fields of objects except for Owner and ACL.
ProjectionNoACL
)
func (p Projection) String() string {
switch p {
case ProjectionFull:
return "full"
case ProjectionNoACL:
return "noAcl"
default:
return ""
}
}
// Query represents a query to filter objects from a bucket.
type Query struct {
// Delimiter returns results in a directory-like fashion.
// Results will contain only objects whose names, aside from the
// prefix, do not contain delimiter. Objects whose names,
// aside from the prefix, contain delimiter will have their name,
// truncated after the delimiter, returned in prefixes.
// Duplicate prefixes are omitted.
// Optional.
Delimiter string
// Prefix is the prefix filter to query objects
// whose names begin with this prefix.
// Optional.
Prefix string
// Versions indicates whether multiple versions of the same
// object will be included in the results.
Versions bool
// fieldSelection is used to select only specific fields to be returned by
// the query. It's used internally and is populated for the user by
// calling Query.SetAttrSelection
fieldSelection string
// StartOffset is used to filter results to objects whose names are
// lexicographically equal to or after startOffset. If endOffset is also set,
// the objects listed will have names between startOffset (inclusive) and
// endOffset (exclusive).
StartOffset string
// EndOffset is used to filter results to objects whose names are
// lexicographically before endOffset. If startOffset is also set, the objects
// listed will have names between startOffset (inclusive) and endOffset (exclusive).
EndOffset string
// Projection defines the set of properties to return. It will default to ProjectionFull,
// which returns all properties. Passing ProjectionNoACL will omit Owner and ACL,
// which may improve performance when listing many objects.
Projection Projection
}
// attrToFieldMap maps the field names of ObjectAttrs to the underlying field
// names in the API call. Only the ObjectAttrs field names are visible to users
// because they are already part of the public API of the package.
var attrToFieldMap = map[string]string{
"Bucket": "bucket",
"Name": "name",
"ContentType": "contentType",
"ContentLanguage": "contentLanguage",
"CacheControl": "cacheControl",
"EventBasedHold": "eventBasedHold",
"TemporaryHold": "temporaryHold",
"RetentionExpirationTime": "retentionExpirationTime",
"ACL": "acl",
"Owner": "owner",
"ContentEncoding": "contentEncoding",
"ContentDisposition": "contentDisposition",
"Size": "size",
"MD5": "md5Hash",
"CRC32C": "crc32c",
"MediaLink": "mediaLink",
"Metadata": "metadata",
"Generation": "generation",
"Metageneration": "metageneration",
"StorageClass": "storageClass",
"CustomerKeySHA256": "customerEncryption",
"KMSKeyName": "kmsKeyName",
"Created": "timeCreated",
"Deleted": "timeDeleted",
"Updated": "updated",
"Etag": "etag",
"CustomTime": "customTime",
}
// SetAttrSelection makes the query populate only specific attributes of
// objects. When iterating over objects, if you only need each object's name
// and size, pass []string{"Name", "Size"} to this method. Only these fields
// will be fetched for each object across the network; the other fields of
// ObjectAttr will remain at their default values. This is a performance
// optimization; for more information, see
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance
func (q *Query) SetAttrSelection(attrs []string) error {
fieldSet := make(map[string]bool)
for _, attr := range attrs {
field, ok := attrToFieldMap[attr]
if !ok {
return fmt.Errorf("storage: attr %v is not valid", attr)
}
fieldSet[field] = true
}
if len(fieldSet) > 0 {
var b bytes.Buffer
b.WriteString("prefixes,items(")
first := true
for field := range fieldSet {
if !first {
b.WriteString(",")
}
first = false
b.WriteString(field)
}
b.WriteString(")")
q.fieldSelection = b.String()
}
return nil
}
// Conditions constrain methods to act on specific generations of
// objects.
//
// The zero value is an empty set of constraints. Not all conditions or
// combinations of conditions are applicable to all methods.
// See https://cloud.google.com/storage/docs/generations-preconditions
// for details on how these operate.
type Conditions struct {
// Generation constraints.
// At most one of the following can be set to a non-zero value.
// GenerationMatch specifies that the object must have the given generation
// for the operation to occur.
// If GenerationMatch is zero, it has no effect.
// Use DoesNotExist to specify that the object does not exist in the bucket.
GenerationMatch int64
// GenerationNotMatch specifies that the object must not have the given
// generation for the operation to occur.
// If GenerationNotMatch is zero, it has no effect.
GenerationNotMatch int64
// DoesNotExist specifies that the object must not exist in the bucket for
// the operation to occur.
// If DoesNotExist is false, it has no effect.
DoesNotExist bool
// Metadata generation constraints.
// At most one of the following can be set to a non-zero value.
// MetagenerationMatch specifies that the object must have the given
// metageneration for the operation to occur.
// If MetagenerationMatch is zero, it has no effect.
MetagenerationMatch int64
// MetagenerationNotMatch specifies that the object must not have the given
// metageneration for the operation to occur.
// If MetagenerationNotMatch is zero, it has no effect.
MetagenerationNotMatch int64
}
func (c *Conditions) validate(method string) error {
if *c == (Conditions{}) {
return fmt.Errorf("storage: %s: empty conditions", method)
}
if !c.isGenerationValid() {
return fmt.Errorf("storage: %s: multiple conditions specified for generation", method)
}
if !c.isMetagenerationValid() {
return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
}
return nil
}
func (c *Conditions) isGenerationValid() bool {
n := 0
if c.GenerationMatch != 0 {
n++
}
if c.GenerationNotMatch != 0 {
n++
}
if c.DoesNotExist {
n++
}
return n <= 1
}
func (c *Conditions) isMetagenerationValid() bool {
return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0
}
// applyConds modifies the provided call using the conditions in conds.
// call is something that quacks like a *raw.WhateverCall.
func applyConds(method string, gen int64, conds *Conditions, call interface{}) error {
cval := reflect.ValueOf(call)
if gen >= 0 {
if !setConditionField(cval, "Generation", gen) {
return fmt.Errorf("storage: %s: generation not supported", method)
}
}
if conds == nil {
return nil
}
if err := conds.validate(method); err != nil {
return err
}
switch {
case conds.GenerationMatch != 0:
if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) {
return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method)
}
case conds.GenerationNotMatch != 0:
if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) {
return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method)
}
case conds.DoesNotExist:
if !setConditionField(cval, "IfGenerationMatch", int64(0)) {
return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
}
}
switch {
case conds.MetagenerationMatch != 0:
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
}
case conds.MetagenerationNotMatch != 0:
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
}
}
return nil
}
func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error {
if gen >= 0 {
call.SourceGeneration(gen)
}
if conds == nil {
return nil
}
if err := conds.validate("CopyTo source"); err != nil {
return err
}
switch {
case conds.GenerationMatch != 0:
call.IfSourceGenerationMatch(conds.GenerationMatch)
case conds.GenerationNotMatch != 0:
call.IfSourceGenerationNotMatch(conds.GenerationNotMatch)
case conds.DoesNotExist:
call.IfSourceGenerationMatch(0)
}
switch {
case conds.MetagenerationMatch != 0:
call.IfSourceMetagenerationMatch(conds.MetagenerationMatch)
case conds.MetagenerationNotMatch != 0:
call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch)
}
return nil
}
// setConditionField sets a field on a *raw.WhateverCall.
// We can't use anonymous interfaces because the return type is
// different, since the field setters are builders.
func setConditionField(call reflect.Value, name string, value interface{}) bool {
m := call.MethodByName(name)
if !m.IsValid() {
return false
}
m.Call([]reflect.Value{reflect.ValueOf(value)})
return true
}
// Retryer returns an object handle that is configured with custom retry
// behavior as specified by the options that are passed to it. All operations
// on the new handle will use the customized retry configuration.
// These retry options will merge with the bucket's retryer (if set) for the
// returned handle. Options passed into this method will take precedence over
// retry options on the bucket and client. Note that you must explicitly pass in
// each option you want to override.
func (o *ObjectHandle) Retryer(opts ...RetryOption) *ObjectHandle {
o2 := *o
var retry *retryConfig
if o.retry != nil {
// merge the options with the existing retry
retry = o.retry
} else {
retry = &retryConfig{}
}
for _, opt := range opts {
opt.apply(retry)
}
o2.retry = retry
o2.acl.retry = retry
return &o2
}
// SetRetry configures the client with custom retry behavior as specified by the
// options that are passed to it. All operations using this client will use the
// customized retry configuration.
// This should be called once before using the client for network operations, as
// there could be indeterminate behaviour with operations in progress.
// Retry options set on a bucket or object handle will take precedence over
// these options.
func (c *Client) SetRetry(opts ...RetryOption) {
var retry *retryConfig
if c.retry != nil {
// merge the options with the existing retry
retry = c.retry
} else {
retry = &retryConfig{}
}
for _, opt := range opts {
opt.apply(retry)
}
c.retry = retry
}
// RetryOption allows users to configure non-default retry behavior for API
// calls made to GCS.
type RetryOption interface {
apply(config *retryConfig)
}
// WithBackoff allows configuration of the backoff timing used for retries.
// Available configuration options (Initial, Max and Multiplier) are described
// at https://pkg.go.dev/github.com/googleapis/gax-go/v2#Backoff. If any fields
// are not supplied by the user, gax default values will be used.
func WithBackoff(backoff gax.Backoff) RetryOption {
return &withBackoff{
backoff: backoff,
}
}
type withBackoff struct {
backoff gax.Backoff
}
func (wb *withBackoff) apply(config *retryConfig) {
config.backoff = &wb.backoff
}
// RetryPolicy describes the available policies for which operations should be
// retried. The default is `RetryIdempotent`.
type RetryPolicy int
const (
// RetryIdempotent causes only idempotent operations to be retried when the
// service returns a transient error. Using this policy, fully idempotent
// operations (such as `ObjectHandle.Attrs()`) will always be retried.
// Conditionally idempotent operations (for example `ObjectHandle.Update()`)
// will be retried only if the necessary conditions have been supplied (in
// the case of `ObjectHandle.Update()` this would mean supplying a
// `Conditions.MetagenerationMatch` condition is required).
RetryIdempotent RetryPolicy = iota
// RetryAlways causes all operations to be retried when the service returns a
// transient error, regardless of idempotency considerations.
RetryAlways
// RetryNever causes the client to not perform retries on failed operations.
RetryNever
)
// WithPolicy allows the configuration of which operations should be performed
// with retries for transient errors.
func WithPolicy(policy RetryPolicy) RetryOption {
return &withPolicy{
policy: policy,
}
}
type withPolicy struct {
policy RetryPolicy
}
func (ws *withPolicy) apply(config *retryConfig) {
config.policy = ws.policy
}
// WithErrorFunc allows users to pass a custom function to the retryer. Errors
// will be retried if and only if `shouldRetry(err)` returns true.
// By default, the following errors are retried (see invoke.go for the default
// shouldRetry function):
//
// - HTTP responses with codes 429, 502, 503, and 504.
//
// - Transient network errors such as connection reset and io.ErrUnexpectedEOF.
//
// - Errors which are considered transient using the Temporary() interface.
//
// - Wrapped versions of these errors.
//
// This option can be used to retry on a different set of errors than the
// default.
func WithErrorFunc(shouldRetry func(err error) bool) RetryOption {
return &withErrorFunc{
shouldRetry: shouldRetry,
}
}
type withErrorFunc struct {
shouldRetry func(err error) bool
}
func (wef *withErrorFunc) apply(config *retryConfig) {
config.shouldRetry = wef.shouldRetry
}
type retryConfig struct {
backoff *gax.Backoff
policy RetryPolicy
shouldRetry func(err error) bool
}
func (r *retryConfig) clone() *retryConfig {
if r == nil {
return nil
}
var bo *gax.Backoff
if r.backoff != nil {
bo = &gax.Backoff{
Initial: r.backoff.Initial,
Max: r.backoff.Max,
Multiplier: r.backoff.Multiplier,
}
}
return &retryConfig{
backoff: bo,
policy: r.policy,
shouldRetry: r.shouldRetry,
}
}
// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
// that modifyCall searches for by name.
type composeSourceObj struct {
src *raw.ComposeRequestSourceObjects
}
func (c composeSourceObj) Generation(gen int64) {
c.src.Generation = gen
}
func (c composeSourceObj) IfGenerationMatch(gen int64) {
// It's safe to overwrite ObjectPreconditions, since its only field is
// IfGenerationMatch.
c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{
IfGenerationMatch: gen,
}
}
func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error {
if key == nil {
return nil
}
// TODO(jbd): Ask the API team to return a more user-friendly error
// and avoid doing this check at the client level.
if len(key) != 32 {
return errors.New("storage: not a 32-byte AES-256 key")
}
var cs string
if copySource {
cs = "copy-source-"
}
headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256")
headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key))
keyHash := sha256.Sum256(key)
headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:]))
return nil
}
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
r := c.raw.Projects.ServiceAccount.Get(projectID)
var res *raw.ServiceAccount
var err error
err = run(ctx, func() error {
res, err = r.Context(ctx).Do()
return err
}, c.retry, true)
if err != nil {
return "", err
}
return res.EmailAddress, nil
}
// bucketResourceName formats the given project ID and bucketResourceName ID
// into a Bucket resource name. This is the format necessary for the gRPC API as
// it conforms to the Resource-oriented design practices in https://google.aip.dev/121.
func bucketResourceName(p, b string) string {
return fmt.Sprintf("projects/%s/buckets/%s", p, b)
}
// parseBucketName strips the leading resource path segment and returns the
// bucket ID, which is the simple Bucket name typical of the v1 API.
func parseBucketName(b string) string {
return strings.TrimPrefix(b, "projects/_/buckets/")
}
// setConditionProtoField uses protobuf reflection to set named condition field
// to the given condition value if supported on the protobuf message.
//
// This is an experimental API and not intended for public use.
func setConditionProtoField(m protoreflect.Message, f string, v int64) bool {
fields := m.Descriptor().Fields()
if rf := fields.ByName(protoreflect.Name(f)); rf != nil {
m.Set(rf, protoreflect.ValueOfInt64(v))
return true
}
return false
}
// applyCondsProto validates and attempts to set the conditions on a protobuf
// message using protobuf reflection.
//
// This is an experimental API and not intended for public use.
func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error {
rmsg := msg.ProtoReflect()
if gen >= 0 {
if !setConditionProtoField(rmsg, "generation", gen) {
return fmt.Errorf("storage: %s: generation not supported", method)
}
}
if conds == nil {
return nil
}
if err := conds.validate(method); err != nil {
return err
}
switch {
case conds.GenerationMatch != 0:
if !setConditionProtoField(rmsg, "if_generation_match", conds.GenerationMatch) {
return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method)
}
case conds.GenerationNotMatch != 0:
if !setConditionProtoField(rmsg, "if_generation_not_match", conds.GenerationNotMatch) {
return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method)
}
case conds.DoesNotExist:
if !setConditionProtoField(rmsg, "if_generation_match", int64(0)) {
return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
}
}
switch {
case conds.MetagenerationMatch != 0:
if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
}
case conds.MetagenerationNotMatch != 0:
if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
}
}
return nil
}
| [
"\"STORAGE_EMULATOR_HOST\""
]
| []
| [
"STORAGE_EMULATOR_HOST"
]
| [] | ["STORAGE_EMULATOR_HOST"] | go | 1 | 0 | |
kubernetes-model/vendor/github.com/coreos/etcd/etcdctl/ctlv2/command/util.go | /**
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strings"
"syscall"
"time"
"github.com/bgentry/speakeasy"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/pkg/transport"
"github.com/urfave/cli"
"golang.org/x/net/context"
)
var (
ErrNoAvailSrc = errors.New("no available argument and stdin")
// the maximum amount of time a dial will wait for a connection to setup.
// 30s is long enough for most of the network conditions.
defaultDialTimeout = 30 * time.Second
)
func argOrStdin(args []string, stdin io.Reader, i int) (string, error) {
if i < len(args) {
return args[i], nil
}
bytes, err := ioutil.ReadAll(stdin)
if string(bytes) == "" || err != nil {
return "", ErrNoAvailSrc
}
return string(bytes), nil
}
func getPeersFlagValue(c *cli.Context) []string {
peerstr := c.GlobalString("endpoints")
if peerstr == "" {
peerstr = os.Getenv("ETCDCTL_ENDPOINTS")
}
if peerstr == "" {
peerstr = c.GlobalString("endpoint")
}
if peerstr == "" {
peerstr = os.Getenv("ETCDCTL_ENDPOINT")
}
if peerstr == "" {
peerstr = c.GlobalString("peers")
}
if peerstr == "" {
peerstr = os.Getenv("ETCDCTL_PEERS")
}
// If we still don't have peers, use a default
if peerstr == "" {
peerstr = "http://127.0.0.1:2379,http://127.0.0.1:4001"
}
return strings.Split(peerstr, ",")
}
func getDomainDiscoveryFlagValue(c *cli.Context) ([]string, error) {
domainstr, insecure := getDiscoveryDomain(c)
// If we still don't have domain discovery, return nothing
if domainstr == "" {
return []string{}, nil
}
discoverer := client.NewSRVDiscover()
eps, err := discoverer.Discover(domainstr)
if err != nil {
return nil, err
}
if insecure {
return eps, err
}
// strip insecure connections
ret := []string{}
for _, ep := range eps {
if strings.HasPrefix("http://", ep) {
fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep)
continue
}
ret = append(ret, ep)
}
return ret, err
}
func getDiscoveryDomain(c *cli.Context) (domainstr string, insecure bool) {
domainstr = c.GlobalString("discovery-srv")
// Use an environment variable if nothing was supplied on the
// command line
if domainstr == "" {
domainstr = os.Getenv("ETCDCTL_DISCOVERY_SRV")
}
insecure = c.GlobalBool("insecure-discovery") || (os.Getenv("ETCDCTL_INSECURE_DISCOVERY") != "")
return domainstr, insecure
}
func getEndpoints(c *cli.Context) ([]string, error) {
eps, err := getDomainDiscoveryFlagValue(c)
if err != nil {
return nil, err
}
// If domain discovery returns no endpoints, check peer flag
if len(eps) == 0 {
eps = getPeersFlagValue(c)
}
for i, ep := range eps {
u, err := url.Parse(ep)
if err != nil {
return nil, err
}
if u.Scheme == "" {
u.Scheme = "http"
}
eps[i] = u.String()
}
return eps, nil
}
func getTransport(c *cli.Context) (*http.Transport, error) {
cafile := c.GlobalString("ca-file")
certfile := c.GlobalString("cert-file")
keyfile := c.GlobalString("key-file")
// Use an environment variable if nothing was supplied on the
// command line
if cafile == "" {
cafile = os.Getenv("ETCDCTL_CA_FILE")
}
if certfile == "" {
certfile = os.Getenv("ETCDCTL_CERT_FILE")
}
if keyfile == "" {
keyfile = os.Getenv("ETCDCTL_KEY_FILE")
}
discoveryDomain, insecure := getDiscoveryDomain(c)
if insecure {
discoveryDomain = ""
}
tls := transport.TLSInfo{
CAFile: cafile,
CertFile: certfile,
KeyFile: keyfile,
ServerName: discoveryDomain,
}
dialTimeout := defaultDialTimeout
totalTimeout := c.GlobalDuration("total-timeout")
if totalTimeout != 0 && totalTimeout < dialTimeout {
dialTimeout = totalTimeout
}
return transport.NewTransport(tls, dialTimeout)
}
func getUsernamePasswordFromFlag(usernameFlag string) (username string, password string, err error) {
return getUsernamePassword("Password: ", usernameFlag)
}
func getUsernamePassword(prompt, usernameFlag string) (username string, password string, err error) {
colon := strings.Index(usernameFlag, ":")
if colon == -1 {
username = usernameFlag
// Prompt for the password.
password, err = speakeasy.Ask(prompt)
if err != nil {
return "", "", err
}
} else {
username = usernameFlag[:colon]
password = usernameFlag[colon+1:]
}
return username, password, nil
}
func mustNewKeyAPI(c *cli.Context) client.KeysAPI {
return client.NewKeysAPI(mustNewClient(c))
}
func mustNewMembersAPI(c *cli.Context) client.MembersAPI {
return client.NewMembersAPI(mustNewClient(c))
}
func mustNewClient(c *cli.Context) client.Client {
hc, err := newClient(c)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
debug := c.GlobalBool("debug")
if debug {
client.EnablecURLDebug()
}
if !c.GlobalBool("no-sync") {
if debug {
fmt.Fprintf(os.Stderr, "start to sync cluster using endpoints(%s)\n", strings.Join(hc.Endpoints(), ","))
}
ctx, cancel := contextWithTotalTimeout(c)
err := hc.Sync(ctx)
cancel()
if err != nil {
if err == client.ErrNoEndpoints {
fmt.Fprintf(os.Stderr, "etcd cluster has no published client endpoints.\n")
fmt.Fprintf(os.Stderr, "Try '--no-sync' if you want to access non-published client endpoints(%s).\n", strings.Join(hc.Endpoints(), ","))
handleError(c, ExitServerError, err)
}
if isConnectionError(err) {
handleError(c, ExitBadConnection, err)
}
}
if debug {
fmt.Fprintf(os.Stderr, "got endpoints(%s) after sync\n", strings.Join(hc.Endpoints(), ","))
}
}
if debug {
fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
}
return hc
}
func isConnectionError(err error) bool {
switch t := err.(type) {
case *client.ClusterError:
for _, cerr := range t.Errors {
if !isConnectionError(cerr) {
return false
}
}
return true
case *net.OpError:
if t.Op == "dial" || t.Op == "read" {
return true
}
return isConnectionError(t.Err)
case net.Error:
if t.Timeout() {
return true
}
case syscall.Errno:
if t == syscall.ECONNREFUSED {
return true
}
}
return false
}
func mustNewClientNoSync(c *cli.Context) client.Client {
hc, err := newClient(c)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
if c.GlobalBool("debug") {
fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
client.EnablecURLDebug()
}
return hc
}
func newClient(c *cli.Context) (client.Client, error) {
eps, err := getEndpoints(c)
if err != nil {
return nil, err
}
tr, err := getTransport(c)
if err != nil {
return nil, err
}
cfg := client.Config{
Transport: tr,
Endpoints: eps,
HeaderTimeoutPerRequest: c.GlobalDuration("timeout"),
}
uFlag := c.GlobalString("username")
if uFlag == "" {
uFlag = os.Getenv("ETCDCTL_USERNAME")
}
if uFlag != "" {
username, password, err := getUsernamePasswordFromFlag(uFlag)
if err != nil {
return nil, err
}
cfg.Username = username
cfg.Password = password
}
return client.New(cfg)
}
func contextWithTotalTimeout(c *cli.Context) (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), c.GlobalDuration("total-timeout"))
}
| [
"\"ETCDCTL_ENDPOINTS\"",
"\"ETCDCTL_ENDPOINT\"",
"\"ETCDCTL_PEERS\"",
"\"ETCDCTL_DISCOVERY_SRV\"",
"\"ETCDCTL_INSECURE_DISCOVERY\"",
"\"ETCDCTL_CA_FILE\"",
"\"ETCDCTL_CERT_FILE\"",
"\"ETCDCTL_KEY_FILE\"",
"\"ETCDCTL_USERNAME\""
]
| []
| [
"ETCDCTL_PEERS",
"ETCDCTL_ENDPOINT",
"ETCDCTL_ENDPOINTS",
"ETCDCTL_INSECURE_DISCOVERY",
"ETCDCTL_KEY_FILE",
"ETCDCTL_USERNAME",
"ETCDCTL_DISCOVERY_SRV",
"ETCDCTL_CERT_FILE",
"ETCDCTL_CA_FILE"
]
| [] | ["ETCDCTL_PEERS", "ETCDCTL_ENDPOINT", "ETCDCTL_ENDPOINTS", "ETCDCTL_INSECURE_DISCOVERY", "ETCDCTL_KEY_FILE", "ETCDCTL_USERNAME", "ETCDCTL_DISCOVERY_SRV", "ETCDCTL_CERT_FILE", "ETCDCTL_CA_FILE"] | go | 9 | 0 | |
du/denv/SshConfigManager.py | import logging
import re
import os
import sys
from subprocess import CalledProcessError
from du.denv.Command import Command
logger = logging.getLogger(__name__.split(".")[-1])
class SshConfigManager:
"""
Manages ssh configuration files on host and client side for given user
"""
# Path to the scp executable for both Linux and Win32 host platfrom
SCP_BINARY = (
"scp"
if sys.platform == Command.PLATFORM_OS_LINUX
else os.path.join(Command.WIN32_SYSTEM_PATH, "OpenSSH\\scp.exe")
)
# Path to the ssh-keygen executable for both Linux and Win32 host platfrom
SSH_KEYGEN_BINARY = (
"ssh-keygen -N '' -f {}"
if sys.platform == Command.PLATFORM_OS_LINUX
else os.path.join(
Command.WIN32_SYSTEM_PATH, 'OpenSSH\\ssh-keygen.exe -P "" -f {}'
)
)
# Path to ssh program data folder on Windows platform
WIN32_SSH_PROGRAM_DATA_PATH = (
os.path.join(
os.environ["AllUsersProfile"],
"ssh",
)
if sys.platform == Command.PLATFORM_OS_WIN32
else ""
)
# System ssh config file on Win32 OS
WIN32_SYSTEM_SSH_CONFIG_FILE = os.path.join(
WIN32_SSH_PROGRAM_DATA_PATH + os.sep, "ssh_config"
)
# Local user SSH root folder
LOCAL_USER_SSH_ROOT_FOLDER = os.path.join(os.path.expanduser("~") + os.sep, ".ssh")
# Local user SSH config file location on Linux OS
LOCAL_LINUX_USER_SSH_CONFIG_FILE = os.path.join(
LOCAL_USER_SSH_ROOT_FOLDER + os.sep, "config"
)
# Local user SSH config file location on Win32 OS
LOCAL_WIN32_USER_SSH_CONFIG_FILE = os.path.join(
LOCAL_USER_SSH_ROOT_FOLDER + os.sep, "config_overlay"
)
# Local user public and private key files
LOCAL_USER_SSH_IDENTITY_FILE_PUBLIC = os.path.join(
LOCAL_USER_SSH_ROOT_FOLDER + os.sep, "id_rsa.pub"
)
LOCAL_USER_SSH_IDENTITY_FILE_PRIVATE = os.path.join(
LOCAL_USER_SSH_ROOT_FOLDER + os.sep, "id_rsa"
)
# Remote side authorized_keys file location (Linux OS only for now)
REMOTE_LINUX_AUTH_KEYS_FILE = "/home/{}/.ssh/authorized_keys"
# Remote user SSH config file location (Linux OS only for now)
REMOTE_LINUX_USER_SSH_CONFIG_FILE = "/home/{}/.ssh/config"
# Docker container SSH config file location
DOCKER_CONTAINER_SSH_CONFIG_FILE = "/home/{}/.ssh/{}"
# OpenSSH default binary location on Windows 10
OPENSSH_WIN10_FILE_LOCATION = re.escape(
r"C:\\Windows\\System32\\{}".format(
re.escape(Command.WIN32_SSH_RELATIVE_EXE_PATH)
)
)
# Command string that is passed to remote shell in order to get the list of active_containers
SSH_DOCKER_CONTAINER_LIST_CMDLINE = 'active_containers=$(docker ps --format "{{ .Names }}" --filter "name=%(filter)s")'
# Line which is prepended to the user's ssh config file on Windows platform
SSH_CONFIG_OVERLAY_INCLUDE_STRING = "Include config_overlay"
# Default container template name
CONTAINER_NAME_TEMPLATE = "{}-denv"
# Windows host system ssh_config file template
WIN32_SSH_CONFIG_FILE_TEMPLATE = """Host *{}*
StrictHostKeyChecking no
UserKnownHostsFile NUL
""".format(
CONTAINER_NAME_TEMPLATE.format("")
)
def __init__(self, command):
"""
Constructor
@param command Command instance to be used
"""
self.__command = command
# Check if the system ssh_config is patched on Windows host platform
self.__sshWinConfigPatcher()
def sanitizeUserSshConfig(self, filter):
"""
Syncrhronize user's ssh config file with active docker containers hosts
and generate global ssh configs for both Linux and Windows platforms
@param filter Filter string used to filter current user's docker containers
@return stdout of bash executed script
"""
# Set ssh config files for given user
configFileLinux = self.REMOTE_LINUX_USER_SSH_CONFIG_FILE.format(
self.__command.getUsername()
)
configFileWin = configFileLinux + "{}".format("_windows")
logger.debug(
"Sanitizing container ssh configs:\n\thost:\t{}\n\tuser:\t{}".format(
self.__command.getHost(), self.__command.getUsername()
)
)
cmdRet = None
# Prepare bash shell command which will update user ssh config
# files based on currently active docker containers
cmd = """
rm -rf %(configLinux)s
rm -rf %(configWin)s
%(dockerListContCommand)s
delete=1
for file in /home/%(username)s/.ssh/%(filter)s*
do
for container in $active_containers; do
if [ \"${file##*/\"$container\"}\" ]; then
delete=1
else
delete=0
break
fi
done
if [ "$delete" = "1" ]; then
rm -rf $file
else
cat $file >> /home/%(username)s/.ssh/config
fi
done
if [ -f "%(configLinux)s" ]; then
sed -e 's/localhost/jump-box/g' -e 's#/dev/null#NUL#g' \
-e %(winSshBinPath)s %(configLinux)s > %(configWin)s
fi
""" % {
"configLinux": configFileLinux,
"configWin": configFileWin,
"dockerListContCommand": self.__command.sshCommandStringConvert(
self.SSH_DOCKER_CONTAINER_LIST_CMDLINE % {"filter": filter}
),
"filter": filter,
"username": self.__command.getUsername(),
"winSshBinPath": "s/ssh/" + self.OPENSSH_WIN10_FILE_LOCATION + "/g",
}
# If host is local and Win32, skip this step
if not (
self.__command.getHostPlatform() is Command.PLATFORM_OS_WIN32
and self.__command.getHost() is None
):
cmdRet = self.__command.runCommand(cmd)
if not cmdRet:
# Copy final ssh config file back to host
self.__copyDockerSshConfigToHost()
return cmdRet
def createDockerContainerSshConfig(self, containerAddress, containerName):
"""
Creates a ssh config for given docker container name and IP address
This config is created with host acting as a jump-box for the spawned docker container's ssh connection
Generated config also disables Host Key Cheking for those ssh connections
@param containerAddress IP address of the docker container
@param containerName Name of the docker container
@return stdout of bash executed script
"""
# Set ssh docker container config file
dockerSshConfig = self.DOCKER_CONTAINER_SSH_CONFIG_FILE.format(
self.__command.getUsername(), containerName
)
logger.debug(
"Creating ssh config for:\n\tuser:\t{}\n\tcontainer:\t{}".format(
self.__command.getUsername(), containerName
)
)
# Prepare bash shell command which will create ssh config for given user and docker container
cmd = """
if [ ! -d "/home/%(username)s/.ssh" ]; then
mkdir "/home/%(username)s/.ssh"
fi
echo "Host localhost" | tee -a %(dockerConfig)s > /dev/null
echo " HostName %(host)s" | tee -a %(dockerConfig)s > /dev/null
echo " User %(username)s" | tee -a %(dockerConfig)s > /dev/null
echo " Port 22" | tee -a %(dockerConfig)s > /dev/null
echo | tee -a %(dockerConfig)s > /dev/null
echo "Host %(dockerName)s" | tee -a %(dockerConfig)s > /dev/null
echo " HostName %(dockerIp)s" | tee -a %(dockerConfig)s > /dev/null
echo " User %(username)s" | tee -a %(dockerConfig)s > /dev/null
echo " StrictHostKeyChecking no" | tee -a %(dockerConfig)s > /dev/null
echo " UserKnownHostsFile /dev/null" | tee -a %(dockerConfig)s > /dev/null
echo " ProxyCommand ssh -q -W %%h:%%p localhost" | tee -a %(dockerConfig)s > /dev/null
echo | tee -a %(dockerConfig)s > /dev/null
""" % {
"username": self.__command.getUsername(),
"dockerConfig": dockerSshConfig,
"dockerIp": containerAddress,
"dockerName": containerName,
"host": self.__command.getHost()
if self.__command.getHost()
else "localhost",
}
return self.__command.runCommand(cmd)
def copyLocalSshPubKeyToRemote(self):
"""
Copies local user's identity file (eg. ~/.ssh/id_rsa.pub) to the
remote host authorized_keys file
If local identity file is not presnet new one will be generated
@return stdout of the executed remote command
"""
# 1. Check if local user has a generated local identity
# Usually it is id_rsa.pub file located in ~/.ssh folder
# If not present try to generate one
if not os.path.exists(self.LOCAL_USER_SSH_IDENTITY_FILE_PUBLIC):
logger.info(
"There is no local user's identity on this machine, we will create one"
)
# If we are on Windows host check if .ssh folder exists in local user's
# home directory and create it since the ssh keygeneration will fail otherwise
if (
self.__command.getHostPlatform() is Command.PLATFORM_OS_WIN32
and not os.path.exists(self.LOCAL_USER_SSH_ROOT_FOLDER)
):
logger.info(
"There is no .ssh folder in user's home direcotry on Windows host, we will creat one."
)
os.makedirs(self.LOCAL_USER_SSH_ROOT_FOLDER)
crateLocalUserPublicKeyCommand = self.SSH_KEYGEN_BINARY.format(
self.LOCAL_USER_SSH_IDENTITY_FILE_PRIVATE
)
self.__command.runCommand(crateLocalUserPublicKeyCommand, True)
# Also on Windows platform we need to create config file if it does not exist
# This file which will include config_overlay file which consists of container
# ssh jump-host configs
if self.__command.getHostPlatform() is Command.PLATFORM_OS_WIN32:
if not os.path.exists(self.LOCAL_LINUX_USER_SSH_CONFIG_FILE):
logger.info(
"There is no ssh config file, we will create one and patch it"
)
self.__fileLinePrepender(
self.LOCAL_LINUX_USER_SSH_CONFIG_FILE,
self.SSH_CONFIG_OVERLAY_INCLUDE_STRING,
True,
)
# If it exists we need to check if the config_overlay is already included
# If not, add that line at the begining of the file
else:
if not self.__fileLineSearch(
self.LOCAL_LINUX_USER_SSH_CONFIG_FILE,
self.SSH_CONFIG_OVERLAY_INCLUDE_STRING,
):
logger.info("ssh config file found but it will be patched")
self.__fileLinePrepender(
self.LOCAL_LINUX_USER_SSH_CONFIG_FILE,
self.SSH_CONFIG_OVERLAY_INCLUDE_STRING,
)
# Get the public key from the id_rsa.pub file
with open(self.LOCAL_USER_SSH_IDENTITY_FILE_PUBLIC, "r") as file:
publicKey = file.read().replace("\n", "")
logger.debug("User's public key: " + publicKey)
# 2. Check if authorized_keys file exists on remote side
# and create it if missing, check if user's public key
# is already there and append it if necessery
logger.debug("Transfering local user's public key to remote side if needed")
# Prepare bash shell command which will do the job
cmd = """
if [ ! -d "/home/%(username)s/.ssh" ]; then
mkdir "/home/%(username)s/.ssh"
fi
if [ -f "%(remoteAuthKeysFile)s" ]; then
echo "File authorized_keys exists, checking if user public key is already there"
if grep -Fxq "%(localUserPublicKey)s" "%(remoteAuthKeysFile)s"; then
echo "User public key found, do nothing"
else
echo "User public key not found, append it"
echo "%(localUserPublicKey)s" | tee -a "%(remoteAuthKeysFile)s" > /dev/null
fi
else
echo "File authorized_keys does not exist, create one and append user public key"
echo "%(localUserPublicKey)s" | tee -a "%(remoteAuthKeysFile)s" > /dev/null
fi
chmod 600 "%(remoteAuthKeysFile)s"
""" % {
"username": self.__command.getUsername(),
"remoteAuthKeysFile": self.REMOTE_LINUX_AUTH_KEYS_FILE.format(
self.__command.getUsername()
),
"localUserPublicKey": publicKey
if self.__command.getHostPlatform() is Command.PLATFORM_OS_LINUX
or Command.PLATFORM_OS_MACOS
else re.escape(publicKey),
}
return self.__command.runCommand(cmd)
def __copyDockerSshConfigToHost(self):
"""
Copies remote ssh config files to the local host
After this step the local host has the ssh config with jump-host
configuration to the remote docker containers
@return stdout of the executed commands
"""
# Set ssh config files for given user
remoteConfigFileLinux = self.REMOTE_LINUX_USER_SSH_CONFIG_FILE.format(
self.__command.getUsername()
)
localConfigFileLinux = self.LOCAL_LINUX_USER_SSH_CONFIG_FILE
remoteConfigFileWin = remoteConfigFileLinux + "{}".format("_windows")
# Determine local host and prepare copy commands accordingly
if (
self.__command.getHostPlatform() == Command.PLATFORM_OS_LINUX
or self.__command.getHostPlatform() == Command.PLATFORM_OS_MACOS
):
logger.debug("Prepare SSH config sync from remote to Linux host")
scpSshConfigCopyCommand = self.SCP_BINARY + " %(username)s@%(remoteHost)s:%(remoteConfigLinux)s %(localConfigLinux)s > \/dev\/null 2>&1" % {
"username": self.__command.getUsername(),
"userLocalWindowsSshConfig": self.LOCAL_WIN32_USER_SSH_CONFIG_FILE,
"remoteHost": self.__command.getHost(),
"remoteConfigLinux": remoteConfigFileLinux,
"localConfigLinux": localConfigFileLinux,
}
localSshConfigPath = localConfigFileLinux
elif self.__command.getHostPlatform() == Command.PLATFORM_OS_WIN32:
logger.debug("Prepare SSH config sync from remote to Win32 host")
scpSshConfigCopyCommand = self.SCP_BINARY + " %(remotePort)s %(username)s@%(remoteHost)s:%(configWin)s %(userLocalWindowsSshConfig)s" % {
"username": self.__command.getUsername(),
"userLocalWindowsSshConfig": self.LOCAL_WIN32_USER_SSH_CONFIG_FILE,
"remoteHost": self.__command.getHost(),
"remotePort": "-p {}".format(self.__command.getPort())
if self.__command.getPort()
else "",
"configWin": remoteConfigFileWin,
}
localSshConfigPath = self.LOCAL_WIN32_USER_SSH_CONFIG_FILE
# Copy the remote ssh config files to local host
scpSshCopyCmdParams = {"command": scpSshConfigCopyCommand, "local": True}
localSshConfigPathParams = {"path": localSshConfigPath}
command_list = [
(self.__command.runCommand, scpSshCopyCmdParams, CalledProcessError),
(os.remove, localSshConfigPathParams, FileNotFoundError),
]
result = None
for action, params, ex in command_list:
try:
result = action(**params)
break
except CalledProcessError as ex:
logger.debug(
"Remote SSH config file missing or some other error - do local cleanup. Return code is {}".format(
ex.returncode
)
)
continue
except FileNotFoundError as ex:
logger.debug(
"Local SSH config file missing or some other error. Strerror: {}, error number: {}".format(
ex.strerror, ex.errno
)
)
return result
def __sshWinConfigPatcher(self):
"""
Patches the ssh_config file on Win32 platform to disable StrictHostChecking option
for containers started by this tool
Call to this function needs to be done from within administrative context
This patching is not needed on Linux platform
"""
# Check if system ssh_config file exists
if self.__command.getHostPlatform() is Command.PLATFORM_OS_WIN32:
if not os.path.exists(self.WIN32_SYSTEM_SSH_CONFIG_FILE):
logger.info("There is no system ssh_config on this Windows host")
# Chek for admin rights
if not self.__command.checkAdmin()[1]:
# Inform user that in order to patch the system ssh_config file
# the tool needs to be restarted from shell with admin privileges
logger.info(
"Please restart this tool from shell with admin privileges, so we can create and patch it"
)
sys.exit()
else:
# Create the file and apply the patch to the begining of the file
self.__fileLinePrepender(
self.WIN32_SYSTEM_SSH_CONFIG_FILE,
self.WIN32_SSH_CONFIG_FILE_TEMPLATE,
True,
)
logger.info(
"We have admin rights... file is crated and patched successfully"
)
else:
if not self.__fileLineSearch(
self.WIN32_SYSTEM_SSH_CONFIG_FILE,
# Do search on the first line only, it is good enough
self.WIN32_SSH_CONFIG_FILE_TEMPLATE.partition("\n")[0],
):
logger.info(
"System ssh_config file found but it needs to be patched"
)
# Chek for admin rights
if not self.__command.checkAdmin()[1]:
# Inform user that in order to patch the system ssh_config file
# the tool needs to be restarted from shell with admin privileges
logger.info(
"Please restart this tool from shell with admin privileges, so we can patch it"
)
sys.exit()
else:
# Append the patch to the begining of the file
self.__fileLinePrepender(
self.WIN32_SYSTEM_SSH_CONFIG_FILE,
self.WIN32_SSH_CONFIG_FILE_TEMPLATE,
)
logger.info(
"We have admin rights... patching is finished successfully"
)
return
def __fileLinePrepender(self, filename, line, newFile=False):
"""
Adds string line to the begining of the file
@param filename File which will be modified (line prepended)
@param line String line
@param newFile If True it will create/overwrite the file. If False it will patch existing file
"""
with open(filename, "w+" if newFile else "r+") as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip("\r\n") + "\n\n" + content)
def __fileLineSearch(self, filename, searchLine):
"""
Searches a string line in the file
@param filename File which will be used for serach
@param searchLine String line that is beeing searched
@return True if line is found, False otherwise
"""
with open(filename, "r") as f:
for line in f:
line = line.rstrip() # remove '\n' at end of line
if searchLine == line:
return True
return False
| []
| []
| [
"AllUsersProfile"
]
| [] | ["AllUsersProfile"] | python | 1 | 0 | |
python_experiments/run_experiments/acc_tri_cnt/run_our_executables_cuda_varying_block_size.py | import socket
from exec_utilities import time_out_util
from config import *
from exec_utilities.exec_utils import *
def run_exp(env_tag=knl_tag):
with open('config.json') as ifs:
my_config_dict = json.load(ifs)[env_tag]
######################
# our_exec_path = my_config_dict["our_exec_cuda_path"]
our_exec_path = '/homes/ywangby/workspace/yche/new-git-repos-yche/scan-query-root/scan-query-cuda/build'
data_set_path = my_config_dict[data_set_path_tag]
######################
our_exec_name_lst = [
'scan-xp-cuda-experimental-bitmap-varying-block-size',
'scan-xp-cuda-experimental-hybrid-kernels-varying-block-size',
]
data_set_lst = [
'snap_livejournal',
'snap_orkut',
'webgraph_webbase',
'webgraph_it',
'webgraph_twitter',
'snap_friendster',
]
exp_res_root_name = 'exp_results'
folder_name = 'overview-10-19-varying-block-size-for-overall-cmp'
# parameters
eps_lst = [0.2]
mu_lst = [5]
thread_num = my_config_dict[thread_num_tag]
thread_num_lst = [thread_num]
# block_size_lst = range(1, 32)
def one_round(is_rev_deg=False, num_of_gpus=4):
for data_set_name in data_set_lst:
if is_rev_deg:
data_set_name = data_set_name + os.sep + rev_deg_order_tag
for our_algorithm in our_exec_name_lst:
# if 'bitmap' in our_algorithm and 'friendster' in data_set_name:
# block_size_lst = range(3, 32)
# else:
# block_size_lst = range(1, 32)
block_size_lst = [4, 8, 16, 32]
for eps in eps_lst:
for mu in mu_lst:
for t_num in thread_num_lst:
for num_pass in block_size_lst:
statistics_dir = os.sep.join(
map(str,
['.', exp_res_root_name,
folder_name + '-' + str(num_of_gpus),
num_pass,
data_set_name, eps, mu, t_num]))
os.system('mkdir -p ' + statistics_dir)
statistics_file_path = statistics_dir + os.sep + our_algorithm + '.txt'
# 1st: write header
os.system(
' '.join(
['echo', my_splitter + time.ctime() + my_splitter, '>>', statistics_file_path]))
# 2nd: run exec cmd
algorithm_path = our_exec_path + os.sep + our_algorithm
params_lst = map(str,
[algorithm_path,
data_set_path + os.sep + data_set_name,
eps, mu, t_num, num_pass, statistics_file_path, '> /dev/null 2>&1'])
cmd = ' '.join(params_lst)
time_out = 3600
my_env = os.environ.copy()
my_env["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, range(4, 4 + num_of_gpus)))
tle_flag, info, correct_info = time_out_util.run_with_timeout(cmd, timeout_sec=time_out,
env=my_env)
# check md5 for correctness, assuming result generated by ppSCAN
def check_result():
tmp_lst = []
gt_res_file_name = '-'.join(map(str, ['result', eps, mu])) + '.txt'
res_file_path = os.sep.join(
[data_set_path, data_set_name, 'scanxp-' + gt_res_file_name])
tmp_tle_flag, tmp_info, tmp_correct_info = time_out_util.run_with_timeout(
' '.join(['md5sum', res_file_path]),
timeout_sec=time_out)
tmp_lst.append(tmp_info + tmp_correct_info)
tmp_tle_flag, tmp_info, tmp_correct_info = time_out_util.run_with_timeout(
' '.join(['md5sum', os.sep.join(
[data_set_path, data_set_name, gt_res_file_name])]), timeout_sec=time_out)
tmp_lst.append(tmp_info + tmp_correct_info)
if len(tmp_lst[0].split()) > 0 and len(tmp_lst[1].split()) > 0 and \
tmp_lst[0].split()[0] == tmp_lst[1].split()[0]:
return True
else:
return 'False\n' + '\n'.join(map(str, tmp_lst))
# 3rd: append outputs
write_split(statistics_file_path)
with open(statistics_file_path, 'a+') as ifs:
ifs.write(correct_info)
ifs.write('\nis_time_out:' + str(tle_flag))
ifs.write('\nis_correct:' + str(check_result()) + '\n')
ifs.write(my_splitter + time.ctime() + my_splitter)
ifs.write('\n\n\n\n')
one_round(is_rev_deg=True, num_of_gpus=1)
one_round(is_rev_deg=False, num_of_gpus=1)
if __name__ == '__main__':
hostname = socket.gethostname()
if hostname.startswith('lccpu12'):
run_exp(env_tag=lccpu12_tag)
else:
print('not supported')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
bccsp/pkcs11/impl_test.go | // +build pkcs11
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package pkcs11
import (
"bytes"
"crypto"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"fmt"
"hash"
"io/ioutil"
"math/big"
"net"
"os"
"strings"
"testing"
"time"
"github.com/hyperledger/fabric/bccsp"
"github.com/hyperledger/fabric/bccsp/signer"
"github.com/hyperledger/fabric/bccsp/sw"
"github.com/hyperledger/fabric/bccsp/utils"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/sha3"
)
var (
currentKS bccsp.KeyStore
currentBCCSP bccsp.BCCSP
currentTestConfig testConfig
)
type testConfig struct {
securityLevel int
hashFamily string
softVerify bool
immutable bool
}
func TestMain(m *testing.M) {
os.Exit(testMain(m))
}
func testMain(m *testing.M) int {
tmpDir, err := ioutil.TempDir("", "pkcs11_ks")
if err != nil {
fmt.Printf("Failed to create keystore directory [%s]\n", err)
return -1
}
defer os.RemoveAll(tmpDir)
keyStore, err := sw.NewFileBasedKeyStore(nil, tmpDir, false)
if err != nil {
fmt.Printf("Failed initiliazing KeyStore [%s]\n", err)
return -1
}
currentKS = keyStore
lib, pin, label := FindPKCS11Lib()
tests := []testConfig{
{256, "SHA2", true, false},
{256, "SHA3", false, false},
{384, "SHA2", false, false},
{384, "SHA3", false, false},
{384, "SHA3", true, false},
}
if strings.Contains(lib, "softhsm") {
tests = append(tests, []testConfig{
{256, "SHA2", true, true},
}...)
}
opts := PKCS11Opts{
Library: lib,
Label: label,
Pin: pin,
}
for _, config := range tests {
currentTestConfig = config
opts.HashFamily = config.hashFamily
opts.SecLevel = config.securityLevel
opts.SoftVerify = config.softVerify
opts.Immutable = config.immutable
currentBCCSP, err = New(opts, keyStore)
if err != nil {
fmt.Printf("Failed initiliazing BCCSP at [%+v] \n%s\n", opts, err)
return -1
}
ret := m.Run()
if ret != 0 {
fmt.Printf("Failed testing at [%+v]\n", opts)
return -1
}
}
return 0
}
func TestNew(t *testing.T) {
opts := PKCS11Opts{
HashFamily: "SHA2",
SecLevel: 256,
SoftVerify: false,
Library: "lib",
Label: "ForFabric",
Pin: "98765432",
}
// Setup PKCS11 library and provide initial set of values
lib, _, _ := FindPKCS11Lib()
opts.Library = lib
// Test for nil keystore
_, err := New(opts, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid bccsp.KeyStore instance. It must be different from nil.")
// Test for invalid PKCS11 loadLib
opts.Library = ""
_, err = New(opts, currentKS)
assert.Error(t, err)
assert.Contains(t, err.Error(), "pkcs11: library path not provided")
}
func TestFindPKCS11LibEnvVars(t *testing.T) {
const (
dummy_PKCS11_LIB = "/usr/lib/pkcs11"
dummy_PKCS11_PIN = "98765432"
dummy_PKCS11_LABEL = "testing"
)
// Set environment variables used for test and preserve
// original values for restoration after test completion
orig_PKCS11_LIB := os.Getenv("PKCS11_LIB")
os.Setenv("PKCS11_LIB", dummy_PKCS11_LIB)
orig_PKCS11_PIN := os.Getenv("PKCS11_PIN")
os.Setenv("PKCS11_PIN", dummy_PKCS11_PIN)
orig_PKCS11_LABEL := os.Getenv("PKCS11_LABEL")
os.Setenv("PKCS11_LABEL", dummy_PKCS11_LABEL)
lib, pin, label := FindPKCS11Lib()
assert.EqualValues(t, dummy_PKCS11_LIB, lib, "FindPKCS11Lib did not return expected library")
assert.EqualValues(t, dummy_PKCS11_PIN, pin, "FindPKCS11Lib did not return expected pin")
assert.EqualValues(t, dummy_PKCS11_LABEL, label, "FindPKCS11Lib did not return expected label")
os.Setenv("PKCS11_LIB", orig_PKCS11_LIB)
os.Setenv("PKCS11_PIN", orig_PKCS11_PIN)
os.Setenv("PKCS11_LABEL", orig_PKCS11_LABEL)
}
func TestInvalidNewParameter(t *testing.T) {
lib, pin, label := FindPKCS11Lib()
opts := PKCS11Opts{
Library: lib,
Label: label,
Pin: pin,
SoftVerify: true,
}
opts.HashFamily = "SHA2"
opts.SecLevel = 0
r, err := New(opts, currentKS)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if r != nil {
t.Fatal("Return value should be equal to nil in this case")
}
opts.HashFamily = "SHA8"
opts.SecLevel = 256
r, err = New(opts, currentKS)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if r != nil {
t.Fatal("Return value should be equal to nil in this case")
}
opts.HashFamily = "SHA2"
opts.SecLevel = 256
r, err = New(opts, nil)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if r != nil {
t.Fatal("Return value should be equal to nil in this case")
}
opts.HashFamily = "SHA3"
opts.SecLevel = 0
r, err = New(opts, nil)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if r != nil {
t.Fatal("Return value should be equal to nil in this case")
}
}
func TestInvalidSKI(t *testing.T) {
k, err := currentBCCSP.GetKey(nil)
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if k != nil {
t.Fatal("Return value should be equal to nil in this case")
}
k, err = currentBCCSP.GetKey([]byte{0, 1, 2, 3, 4, 5, 6})
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if k != nil {
t.Fatal("Return value should be equal to nil in this case")
}
}
func TestInvalidAltId(t *testing.T) {
opts := PKCS11Opts{
HashFamily: currentTestConfig.hashFamily,
SecLevel: currentTestConfig.securityLevel,
SoftVerify: currentTestConfig.softVerify,
Immutable: currentTestConfig.immutable,
AltId: "ADifferentAltId",
Library: "lib",
Label: "ForFabric",
Pin: "98765432",
}
// Setup PKCS11 library and provide initial set of values
lib, _, _ := FindPKCS11Lib()
opts.Library = lib
// Create temporary BCCSP set with an initial label
testBCCSP, err := New(opts, currentKS)
if err != nil {
t.Fatalf("Failed initiliazing Test BCCSP at [%+v] \n%s\n", opts, err)
}
// Now, try to retrieve the key using a different label
k, err := testBCCSP.GetKey([]byte{0, 1, 2, 3, 4, 5, 6})
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if k != nil {
t.Fatal("Return value should be equal to nil in this case")
}
}
func TestKeyGenECDSAOpts(t *testing.T) {
// Curve P256
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAP256KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA P256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating ECDSA P256 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating ECDSA P256 key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating ECDSA P256 key. Key should be asymmetric")
}
ecdsaKey := k.(*ecdsaPrivateKey).pub
if elliptic.P256() != ecdsaKey.pub.Curve {
t.Fatal("P256 generated key in invalid. The curve must be P256.")
}
// Curve P384
k, err = currentBCCSP.KeyGen(&bccsp.ECDSAP384KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA P384 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating ECDSA P384 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating ECDSA P384 key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating ECDSA P384 key. Key should be asymmetric")
}
ecdsaKey = k.(*ecdsaPrivateKey).pub
if elliptic.P384() != ecdsaKey.pub.Curve {
t.Fatal("P256 generated key in invalid. The curve must be P384.")
}
}
func TestKeyGenRSAOpts(t *testing.T) {
// 1024
k, err := currentBCCSP.KeyGen(&bccsp.RSA1024KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA 1024 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating RSA 1024 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating RSA 1024 key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating RSA 1024 key. Key should be asymmetric")
}
// 2048
k, err = currentBCCSP.KeyGen(&bccsp.RSA2048KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA 2048 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating RSA 2048 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating RSA 2048 key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating RSA 2048 key. Key should be asymmetric")
}
}
func TestKeyGenAESOpts(t *testing.T) {
// AES 128
k, err := currentBCCSP.KeyGen(&bccsp.AES128KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES 128 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating AES 128 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating AES 128 key. Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed generating AES 128 key. Key should be symmetric")
}
// AES 192
k, err = currentBCCSP.KeyGen(&bccsp.AES192KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES 192 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating AES 192 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating AES 192 key. Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed generating AES 192 key. Key should be symmetric")
}
// AES 256
k, err = currentBCCSP.KeyGen(&bccsp.AES256KeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES 256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating AES 256 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating AES 256 key. Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed generating AES 256 key. Key should be symmetric")
}
}
func TestHashOpts(t *testing.T) {
msg := []byte("abcd")
// SHA256
digest1, err := currentBCCSP.Hash(msg, &bccsp.SHA256Opts{})
if err != nil {
t.Fatalf("Failed computing SHA256 [%s]", err)
}
h := sha256.New()
h.Write(msg)
digest2 := h.Sum(nil)
if !bytes.Equal(digest1, digest2) {
t.Fatalf("Different SHA256 computed. [%x][%x]", digest1, digest2)
}
// SHA384
digest1, err = currentBCCSP.Hash(msg, &bccsp.SHA384Opts{})
if err != nil {
t.Fatalf("Failed computing SHA384 [%s]", err)
}
h = sha512.New384()
h.Write(msg)
digest2 = h.Sum(nil)
if !bytes.Equal(digest1, digest2) {
t.Fatalf("Different SHA384 computed. [%x][%x]", digest1, digest2)
}
// SHA3_256O
digest1, err = currentBCCSP.Hash(msg, &bccsp.SHA3_256Opts{})
if err != nil {
t.Fatalf("Failed computing SHA3_256 [%s]", err)
}
h = sha3.New256()
h.Write(msg)
digest2 = h.Sum(nil)
if !bytes.Equal(digest1, digest2) {
t.Fatalf("Different SHA3_256 computed. [%x][%x]", digest1, digest2)
}
// SHA3_384
digest1, err = currentBCCSP.Hash(msg, &bccsp.SHA3_384Opts{})
if err != nil {
t.Fatalf("Failed computing SHA3_384 [%s]", err)
}
h = sha3.New384()
h.Write(msg)
digest2 = h.Sum(nil)
if !bytes.Equal(digest1, digest2) {
t.Fatalf("Different SHA3_384 computed. [%x][%x]", digest1, digest2)
}
}
func TestECDSAKeyGenEphemeral(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: true})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating ECDSA key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating ECDSA key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating ECDSA key. Key should be asymmetric")
}
raw, err := k.Bytes()
if err == nil {
t.Fatal("Failed marshalling to bytes. Marshalling must fail.")
}
if len(raw) != 0 {
t.Fatal("Failed marshalling to bytes. Output should be 0 bytes")
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting corresponding public key [%s]", err)
}
if pk == nil {
t.Fatal("Public key must be different from nil.")
}
}
func TestECDSAPrivateKeySKI(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
ski := k.SKI()
if len(ski) == 0 {
t.Fatal("SKI not valid. Zero length.")
}
}
func TestECDSAKeyGenNonEphemeral(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating ECDSA key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating ECDSA key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating ECDSA key. Key should be asymmetric")
}
}
func TestECDSAGetKeyBySKI(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
k2, err := currentBCCSP.GetKey(k.SKI())
if err != nil {
t.Fatalf("Failed getting ECDSA key [%s]", err)
}
if k2 == nil {
t.Fatal("Failed getting ECDSA key. Key must be different from nil")
}
if !k2.Private() {
t.Fatal("Failed getting ECDSA key. Key should be private")
}
if k2.Symmetric() {
t.Fatal("Failed getting ECDSA key. Key should be asymmetric")
}
// Check that the SKIs are the same
if !bytes.Equal(k.SKI(), k2.SKI()) {
t.Fatalf("SKIs are different [%x]!=[%x]", k.SKI(), k2.SKI())
}
}
func TestECDSAPublicKeyFromPrivateKey(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private ECDSA key [%s]", err)
}
if pk == nil {
t.Fatal("Failed getting public key from private ECDSA key. Key must be different from nil")
}
if pk.Private() {
t.Fatal("Failed generating ECDSA key. Key should be public")
}
if pk.Symmetric() {
t.Fatal("Failed generating ECDSA key. Key should be asymmetric")
}
}
func TestECDSAPublicKeyBytes(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private ECDSA key [%s]", err)
}
raw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed marshalling ECDSA public key [%s]", err)
}
if len(raw) == 0 {
t.Fatal("Failed marshalling ECDSA public key. Zero length")
}
}
func TestECDSAPublicKeySKI(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private ECDSA key [%s]", err)
}
ski := pk.SKI()
if len(ski) == 0 {
t.Fatal("SKI not valid. Zero length.")
}
}
func TestECDSASign(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
if len(signature) == 0 {
t.Fatal("Failed generating ECDSA key. Signature must be different from nil")
}
_, err = currentBCCSP.Sign(nil, digest, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid Key. It must not be nil")
_, err = currentBCCSP.Sign(k, nil, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid digest. Cannot be empty")
}
func TestECDSAVerify(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(k, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting corresponding public key [%s]", err)
}
valid, err = currentBCCSP.Verify(pk, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
_, err = currentBCCSP.Verify(nil, signature, digest, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid Key. It must not be nil")
_, err = currentBCCSP.Verify(pk, nil, digest, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid signature. Cannot be empty")
_, err = currentBCCSP.Verify(pk, signature, nil, nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "Invalid digest. Cannot be empty")
// Import the exported public key
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting ECDSA raw public key [%s]", err)
}
// Store public key
_, err = currentBCCSP.KeyImport(pkRaw, &bccsp.ECDSAPKIXPublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed storing corresponding public key [%s]", err)
}
pk2, err := currentBCCSP.GetKey(pk.SKI())
if err != nil {
t.Fatalf("Failed retrieving corresponding public key [%s]", err)
}
valid, err = currentBCCSP.Verify(pk2, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
}
func TestECDSAKeyImportFromExportedKey(t *testing.T) {
// Generate an ECDSA key
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting ECDSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting ECDSA raw public key [%s]", err)
}
// Import the exported public key
pk2, err := currentBCCSP.KeyImport(pkRaw, &bccsp.ECDSAPKIXPublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing ECDSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing ECDSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
}
func TestECDSAKeyImportFromECDSAPublicKey(t *testing.T) {
// Generate an ECDSA key
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting ECDSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting ECDSA raw public key [%s]", err)
}
pub, err := utils.DERToPublicKey(pkRaw)
if err != nil {
t.Fatalf("Failed converting raw to ecdsa.PublicKey [%s]", err)
}
// Import the ecdsa.PublicKey
pk2, err := currentBCCSP.KeyImport(pub, &bccsp.ECDSAGoPublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing ECDSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing ECDSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
}
func TestKeyImportFromX509ECDSAPublicKey(t *testing.T) {
// Generate an ECDSA key
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
// Generate a self-signed certificate
testExtKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}
testUnknownExtKeyUsage := []asn1.ObjectIdentifier{[]int{1, 2, 3}, []int{2, 59, 1}}
extraExtensionData := []byte("extra extension")
commonName := "test.example.com"
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: commonName,
Organization: []string{"Σ Acme Co"},
Country: []string{"US"},
ExtraNames: []pkix.AttributeTypeAndValue{
{
Type: []int{2, 5, 4, 42},
Value: "Gopher",
},
// This should override the Country, above.
{
Type: []int{2, 5, 4, 6},
Value: "NL",
},
},
},
NotBefore: time.Now().Add(-1 * time.Hour),
NotAfter: time.Now().Add(1 * time.Hour),
SignatureAlgorithm: x509.ECDSAWithSHA256,
SubjectKeyId: []byte{1, 2, 3, 4},
KeyUsage: x509.KeyUsageCertSign,
ExtKeyUsage: testExtKeyUsage,
UnknownExtKeyUsage: testUnknownExtKeyUsage,
BasicConstraintsValid: true,
IsCA: true,
OCSPServer: []string{"http://ocurrentBCCSP.example.com"},
IssuingCertificateURL: []string{"http://crt.example.com/ca1.crt"},
DNSNames: []string{"test.example.com"},
EmailAddresses: []string{"[email protected]"},
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1).To4(), net.ParseIP("2001:4860:0:2001::68")},
PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}},
PermittedDNSDomains: []string{".example.com", "example.com"},
CRLDistributionPoints: []string{"http://crl1.example.com/ca1.crl", "http://crl2.example.com/ca1.crl"},
ExtraExtensions: []pkix.Extension{
{
Id: []int{1, 2, 3, 4},
Value: extraExtensionData,
},
},
}
cryptoSigner, err := signer.New(currentBCCSP, k)
if err != nil {
t.Fatalf("Failed initializing CyrptoSigner [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting ECDSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting ECDSA raw public key [%s]", err)
}
pub, err := utils.DERToPublicKey(pkRaw)
if err != nil {
t.Fatalf("Failed converting raw to ECDSA.PublicKey [%s]", err)
}
certRaw, err := x509.CreateCertificate(rand.Reader, &template, &template, pub, cryptoSigner)
if err != nil {
t.Fatalf("Failed generating self-signed certificate [%s]", err)
}
cert, err := utils.DERToX509Certificate(certRaw)
if err != nil {
t.Fatalf("Failed generating X509 certificate object from raw [%s]", err)
}
// Import the certificate's public key
pk2, err := currentBCCSP.KeyImport(cert, &bccsp.X509PublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing ECDSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing ECDSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
}
func TestECDSASignatureEncoding(t *testing.T) {
v := []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x02, 0xff, 0xf1}
_, err := asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
v = []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x02, 0x00, 0x01}
_, err = asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
v = []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x81, 0x01, 0x01}
_, err = asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
v = []byte{0x30, 0x07, 0x02, 0x01, 0x8F, 0x02, 0x81, 0x01, 0x8F}
_, err = asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
v = []byte{0x30, 0x0A, 0x02, 0x01, 0x8F, 0x02, 0x05, 0x00, 0x00, 0x00, 0x00, 0x8F}
_, err = asn1.Unmarshal(v, &utils.ECDSASignature{})
if err == nil {
t.Fatalf("Unmarshalling should fail for [% x]", v)
}
t.Logf("Unmarshalling correctly failed for [% x] [%s]", v, err)
}
func TestECDSALowS(t *testing.T) {
// Ensure that signature with low-S are generated
k, err := currentBCCSP.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating ECDSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, nil)
if err != nil {
t.Fatalf("Failed generating ECDSA signature [%s]", err)
}
R, S, err := utils.UnmarshalECDSASignature(signature)
if err != nil {
t.Fatalf("Failed unmarshalling signature [%s]", err)
}
if S.Cmp(utils.GetCurveHalfOrdersAt(k.(*ecdsaPrivateKey).pub.pub.Curve)) >= 0 {
t.Fatal("Invalid signature. It must have low-S")
}
valid, err := currentBCCSP.Verify(k, signature, digest, nil)
if err != nil {
t.Fatalf("Failed verifying ECDSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying ECDSA signature. Signature not valid.")
}
// Ensure that signature with high-S are rejected.
for {
R, S, err = currentBCCSP.(*impl).signP11ECDSA(k.SKI(), digest)
if err != nil {
t.Fatalf("Failed generating signature [%s]", err)
}
if S.Cmp(utils.GetCurveHalfOrdersAt(k.(*ecdsaPrivateKey).pub.pub.Curve)) > 0 {
break
}
}
sig, err := utils.MarshalECDSASignature(R, S)
if err != nil {
t.Fatalf("Failing unmarshalling signature [%s]", err)
}
valid, err = currentBCCSP.Verify(k, sig, digest, nil)
if err == nil {
t.Fatal("Failed verifying ECDSA signature. It must fail for a signature with high-S")
}
if valid {
t.Fatal("Failed verifying ECDSA signature. It must fail for a signature with high-S")
}
}
func TestAESKeyGen(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating AES_256 key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating AES_256 key. Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed generating AES_256 key. Key should be symmetric")
}
pk, err := k.PublicKey()
if err == nil {
t.Fatal("Error should be different from nil in this case")
}
if pk != nil {
t.Fatal("Return value should be equal to nil in this case")
}
}
func TestAESEncrypt(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
ct, err := currentBCCSP.Encrypt(k, []byte("Hello World"), &bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed encrypting [%s]", err)
}
if len(ct) == 0 {
t.Fatal("Failed encrypting. Nil ciphertext")
}
}
func TestAESDecrypt(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
msg := []byte("Hello World")
ct, err := currentBCCSP.Encrypt(k, msg, &bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed encrypting [%s]", err)
}
pt, err := currentBCCSP.Decrypt(k, ct, bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed decrypting [%s]", err)
}
if len(ct) == 0 {
t.Fatal("Failed decrypting. Nil plaintext")
}
if !bytes.Equal(msg, pt) {
t.Fatalf("Failed decrypting. Decrypted plaintext is different from the original. [%x][%x]", msg, pt)
}
}
func TestHMACTruncated256KeyDerivOverAES256Key(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
hmcaedKey, err := currentBCCSP.KeyDeriv(k, &bccsp.HMACTruncated256AESDeriveKeyOpts{Temporary: false, Arg: []byte{1}})
if err != nil {
t.Fatalf("Failed HMACing AES_256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed HMACing AES_256 key. HMACed Key must be different from nil")
}
if !hmcaedKey.Private() {
t.Fatal("Failed HMACing AES_256 key. HMACed Key should be private")
}
if !hmcaedKey.Symmetric() {
t.Fatal("Failed HMACing AES_256 key. HMACed Key should be asymmetric")
}
raw, err := hmcaedKey.Bytes()
if err == nil {
t.Fatal("Failed marshalling to bytes. Operation must be forbidden")
}
if len(raw) != 0 {
t.Fatal("Failed marshalling to bytes. Operation must return 0 bytes")
}
msg := []byte("Hello World")
ct, err := currentBCCSP.Encrypt(hmcaedKey, msg, &bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed encrypting [%s]", err)
}
pt, err := currentBCCSP.Decrypt(hmcaedKey, ct, bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed decrypting [%s]", err)
}
if len(ct) == 0 {
t.Fatal("Failed decrypting. Nil plaintext")
}
if !bytes.Equal(msg, pt) {
t.Fatalf("Failed decrypting. Decrypted plaintext is different from the original. [%x][%x]", msg, pt)
}
}
func TestHMACKeyDerivOverAES256Key(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
hmcaedKey, err := currentBCCSP.KeyDeriv(k, &bccsp.HMACDeriveKeyOpts{Temporary: false, Arg: []byte{1}})
if err != nil {
t.Fatalf("Failed HMACing AES_256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed HMACing AES_256 key. HMACed Key must be different from nil")
}
if !hmcaedKey.Private() {
t.Fatal("Failed HMACing AES_256 key. HMACed Key should be private")
}
if !hmcaedKey.Symmetric() {
t.Fatal("Failed HMACing AES_256 key. HMACed Key should be asymmetric")
}
raw, err := hmcaedKey.Bytes()
if err != nil {
t.Fatalf("Failed marshalling to bytes [%s]", err)
}
if len(raw) == 0 {
t.Fatal("Failed marshalling to bytes. 0 bytes")
}
}
func TestAES256KeyImport(t *testing.T) {
raw, err := sw.GetRandomBytes(32)
if err != nil {
t.Fatalf("Failed generating AES key [%s]", err)
}
k, err := currentBCCSP.KeyImport(raw, &bccsp.AES256ImportKeyOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing AES_256 key [%s]", err)
}
if k == nil {
t.Fatal("Failed importing AES_256 key. Imported Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed HMACing AES_256 key. Imported Key should be private")
}
if !k.Symmetric() {
t.Fatal("Failed HMACing AES_256 key. Imported Key should be asymmetric")
}
raw, err = k.Bytes()
if err == nil {
t.Fatal("Failed marshalling to bytes. Marshalling must fail.")
}
if len(raw) != 0 {
t.Fatal("Failed marshalling to bytes. Output should be 0 bytes")
}
msg := []byte("Hello World")
ct, err := currentBCCSP.Encrypt(k, msg, &bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed encrypting [%s]", err)
}
pt, err := currentBCCSP.Decrypt(k, ct, bccsp.AESCBCPKCS7ModeOpts{})
if err != nil {
t.Fatalf("Failed decrypting [%s]", err)
}
if len(ct) == 0 {
t.Fatal("Failed decrypting. Nil plaintext")
}
if !bytes.Equal(msg, pt) {
t.Fatalf("Failed decrypting. Decrypted plaintext is different from the original. [%x][%x]", msg, pt)
}
}
func TestAES256KeyImportBadPaths(t *testing.T) {
_, err := currentBCCSP.KeyImport(nil, &bccsp.AES256ImportKeyOpts{Temporary: false})
if err == nil {
t.Fatal("Failed importing key. Must fail on importing nil key")
}
_, err = currentBCCSP.KeyImport([]byte{1}, &bccsp.AES256ImportKeyOpts{Temporary: false})
if err == nil {
t.Fatal("Failed importing key. Must fail on importing a key with an invalid length")
}
}
func TestAES256KeyGenSKI(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.AESKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating AES_256 key [%s]", err)
}
k2, err := currentBCCSP.GetKey(k.SKI())
if err != nil {
t.Fatalf("Failed getting AES_256 key [%s]", err)
}
if k2 == nil {
t.Fatal("Failed getting AES_256 key. Key must be different from nil")
}
if !k2.Private() {
t.Fatal("Failed getting AES_256 key. Key should be private")
}
if !k2.Symmetric() {
t.Fatal("Failed getting AES_256 key. Key should be symmetric")
}
// Check that the SKIs are the same
if !bytes.Equal(k.SKI(), k2.SKI()) {
t.Fatalf("SKIs are different [%x]!=[%x]", k.SKI(), k2.SKI())
}
}
func TestSHA(t *testing.T) {
for i := 0; i < 100; i++ {
b, err := sw.GetRandomBytes(i)
if err != nil {
t.Fatalf("Failed getting random bytes [%s]", err)
}
h1, err := currentBCCSP.Hash(b, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing SHA [%s]", err)
}
var h hash.Hash
switch currentTestConfig.hashFamily {
case "SHA2":
switch currentTestConfig.securityLevel {
case 256:
h = sha256.New()
case 384:
h = sha512.New384()
default:
t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel)
}
case "SHA3":
switch currentTestConfig.securityLevel {
case 256:
h = sha3.New256()
case 384:
h = sha3.New384()
default:
t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel)
}
default:
t.Fatalf("Invalid hash family [%s]", currentTestConfig.hashFamily)
}
h.Write(b)
h2 := h.Sum(nil)
if !bytes.Equal(h1, h2) {
t.Fatalf("Discrempancy found in HASH result [%x], [%x]!=[%x]", b, h1, h2)
}
}
}
func TestRSAKeyGenEphemeral(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: true})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating RSA key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating RSA key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating RSA key. Key should be asymmetric")
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed generating RSA corresponding public key [%s]", err)
}
if pk == nil {
t.Fatal("PK must be different from nil")
}
b, err := k.Bytes()
if err == nil {
t.Fatal("Secret keys cannot be exported. It must fail in this case")
}
if len(b) != 0 {
t.Fatal("Secret keys cannot be exported. It must be nil")
}
}
func TestRSAPrivateKeySKI(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
ski := k.SKI()
if len(ski) == 0 {
t.Fatal("SKI not valid. Zero length.")
}
}
func TestRSAKeyGenNonEphemeral(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
if k == nil {
t.Fatal("Failed generating RSA key. Key must be different from nil")
}
if !k.Private() {
t.Fatal("Failed generating RSA key. Key should be private")
}
if k.Symmetric() {
t.Fatal("Failed generating RSA key. Key should be asymmetric")
}
}
func TestRSAGetKeyBySKI(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
k2, err := currentBCCSP.GetKey(k.SKI())
if err != nil {
t.Fatalf("Failed getting RSA key [%s]", err)
}
if k2 == nil {
t.Fatal("Failed getting RSA key. Key must be different from nil")
}
if !k2.Private() {
t.Fatal("Failed getting RSA key. Key should be private")
}
if k2.Symmetric() {
t.Fatal("Failed getting RSA key. Key should be asymmetric")
}
// Check that the SKIs are the same
if !bytes.Equal(k.SKI(), k2.SKI()) {
t.Fatalf("SKIs are different [%x]!=[%x]", k.SKI(), k2.SKI())
}
}
func TestRSAPublicKeyFromPrivateKey(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private RSA key [%s]", err)
}
if pk == nil {
t.Fatal("Failed getting public key from private RSA key. Key must be different from nil")
}
if pk.Private() {
t.Fatal("Failed generating RSA key. Key should be public")
}
if pk.Symmetric() {
t.Fatal("Failed generating RSA key. Key should be asymmetric")
}
}
func TestRSAPublicKeyBytes(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private RSA key [%s]", err)
}
raw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed marshalling RSA public key [%s]", err)
}
if len(raw) == 0 {
t.Fatal("Failed marshalling RSA public key. Zero length")
}
}
func TestRSAPublicKeySKI(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting public key from private RSA key [%s]", err)
}
ski := pk.SKI()
if len(ski) == 0 {
t.Fatal("SKI not valid. Zero length.")
}
}
func TestRSASign(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed generating RSA signature [%s]", err)
}
if len(signature) == 0 {
t.Fatal("Failed generating RSA key. Signature must be different from nil")
}
}
func TestRSAVerify(t *testing.T) {
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed generating RSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(k, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting corresponding public key [%s]", err)
}
valid, err = currentBCCSP.Verify(pk, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
// Store public key
err = currentKS.StoreKey(pk)
if err != nil {
t.Fatalf("Failed storing corresponding public key [%s]", err)
}
pk2, err := currentKS.GetKey(pk.SKI())
if err != nil {
t.Fatalf("Failed retrieving corresponding public key [%s]", err)
}
valid, err = currentBCCSP.Verify(pk2, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
}
func TestRSAKeyImportFromRSAPublicKey(t *testing.T) {
// Generate an RSA key
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting RSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting RSA raw public key [%s]", err)
}
pub, err := utils.DERToPublicKey(pkRaw)
if err != nil {
t.Fatalf("Failed converting raw to RSA.PublicKey [%s]", err)
}
// Import the RSA.PublicKey
pk2, err := currentBCCSP.KeyImport(pub, &bccsp.RSAGoPublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing RSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing RSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed generating RSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
}
func TestKeyImportFromX509RSAPublicKey(t *testing.T) {
// Generate an RSA key
k, err := currentBCCSP.KeyGen(&bccsp.RSAKeyGenOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed generating RSA key [%s]", err)
}
// Generate a self-signed certificate
testExtKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}
testUnknownExtKeyUsage := []asn1.ObjectIdentifier{[]int{1, 2, 3}, []int{2, 59, 1}}
extraExtensionData := []byte("extra extension")
commonName := "test.example.com"
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: commonName,
Organization: []string{"Σ Acme Co"},
Country: []string{"US"},
ExtraNames: []pkix.AttributeTypeAndValue{
{
Type: []int{2, 5, 4, 42},
Value: "Gopher",
},
// This should override the Country, above.
{
Type: []int{2, 5, 4, 6},
Value: "NL",
},
},
},
NotBefore: time.Now().Add(-1 * time.Hour),
NotAfter: time.Now().Add(1 * time.Hour),
SignatureAlgorithm: x509.SHA256WithRSA,
SubjectKeyId: []byte{1, 2, 3, 4},
KeyUsage: x509.KeyUsageCertSign,
ExtKeyUsage: testExtKeyUsage,
UnknownExtKeyUsage: testUnknownExtKeyUsage,
BasicConstraintsValid: true,
IsCA: true,
OCSPServer: []string{"http://ocurrentBCCSP.example.com"},
IssuingCertificateURL: []string{"http://crt.example.com/ca1.crt"},
DNSNames: []string{"test.example.com"},
EmailAddresses: []string{"[email protected]"},
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1).To4(), net.ParseIP("2001:4860:0:2001::68")},
PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}},
PermittedDNSDomains: []string{".example.com", "example.com"},
CRLDistributionPoints: []string{"http://crl1.example.com/ca1.crl", "http://crl2.example.com/ca1.crl"},
ExtraExtensions: []pkix.Extension{
{
Id: []int{1, 2, 3, 4},
Value: extraExtensionData,
},
},
}
cryptoSigner, err := signer.New(currentBCCSP, k)
if err != nil {
t.Fatalf("Failed initializing CyrptoSigner [%s]", err)
}
// Export the public key
pk, err := k.PublicKey()
if err != nil {
t.Fatalf("Failed getting RSA public key [%s]", err)
}
pkRaw, err := pk.Bytes()
if err != nil {
t.Fatalf("Failed getting RSA raw public key [%s]", err)
}
pub, err := utils.DERToPublicKey(pkRaw)
if err != nil {
t.Fatalf("Failed converting raw to RSA.PublicKey [%s]", err)
}
certRaw, err := x509.CreateCertificate(rand.Reader, &template, &template, pub, cryptoSigner)
if err != nil {
t.Fatalf("Failed generating self-signed certificate [%s]", err)
}
cert, err := utils.DERToX509Certificate(certRaw)
if err != nil {
t.Fatalf("Failed generating X509 certificate object from raw [%s]", err)
}
// Import the certificate's public key
pk2, err := currentBCCSP.KeyImport(cert, &bccsp.X509PublicKeyImportOpts{Temporary: false})
if err != nil {
t.Fatalf("Failed importing RSA public key [%s]", err)
}
if pk2 == nil {
t.Fatal("Failed importing RSA public key. Return BCCSP key cannot be nil.")
}
// Sign and verify with the imported public key
msg := []byte("Hello World")
digest, err := currentBCCSP.Hash(msg, &bccsp.SHAOpts{})
if err != nil {
t.Fatalf("Failed computing HASH [%s]", err)
}
signature, err := currentBCCSP.Sign(k, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed generating RSA signature [%s]", err)
}
valid, err := currentBCCSP.Verify(pk2, signature, digest, &rsa.PSSOptions{SaltLength: 32, Hash: getCryptoHashIndex(t)})
if err != nil {
t.Fatalf("Failed verifying RSA signature [%s]", err)
}
if !valid {
t.Fatal("Failed verifying RSA signature. Signature not valid.")
}
}
func getCryptoHashIndex(t *testing.T) crypto.Hash {
switch currentTestConfig.hashFamily {
case "SHA2":
switch currentTestConfig.securityLevel {
case 256:
return crypto.SHA256
case 384:
return crypto.SHA384
default:
t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel)
}
case "SHA3":
switch currentTestConfig.securityLevel {
case 256:
return crypto.SHA3_256
case 384:
return crypto.SHA3_384
default:
t.Fatalf("Invalid security level [%d]", currentTestConfig.securityLevel)
}
default:
t.Fatalf("Invalid hash family [%s]", currentTestConfig.hashFamily)
}
return crypto.SHA3_256
}
| [
"\"PKCS11_LIB\"",
"\"PKCS11_PIN\"",
"\"PKCS11_LABEL\""
]
| []
| [
"PKCS11_PIN",
"PKCS11_LIB",
"PKCS11_LABEL"
]
| [] | ["PKCS11_PIN", "PKCS11_LIB", "PKCS11_LABEL"] | go | 3 | 0 | |
tests/integration/awslambda/test_lambda.py | import base64
import json
import logging
import os
import re
import shutil
import time
from datetime import datetime
from io import BytesIO
import pytest
from localstack import config
from localstack.constants import LAMBDA_TEST_ROLE, TEST_AWS_ACCOUNT_ID
from localstack.services.awslambda import lambda_api
from localstack.services.awslambda.lambda_api import (
LAMBDA_DEFAULT_HANDLER,
get_lambda_policy_name,
use_docker,
)
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_DOTNETCORE2,
LAMBDA_RUNTIME_DOTNETCORE31,
LAMBDA_RUNTIME_GOLANG,
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA8_AL2,
LAMBDA_RUNTIME_JAVA11,
LAMBDA_RUNTIME_NODEJS10X,
LAMBDA_RUNTIME_NODEJS12X,
LAMBDA_RUNTIME_NODEJS14X,
LAMBDA_RUNTIME_PROVIDED,
LAMBDA_RUNTIME_PROVIDED_AL2,
LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_PYTHON37,
LAMBDA_RUNTIME_PYTHON38,
LAMBDA_RUNTIME_PYTHON39,
LAMBDA_RUNTIME_RUBY27,
)
from localstack.services.install import (
GO_RUNTIME_VERSION,
INSTALL_PATH_LOCALSTACK_FAT_JAR,
TEST_LAMBDA_JAVA,
download_and_extract,
)
from localstack.utils import testutil
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
cp_r,
get_arch,
get_os,
load_file,
mkdir,
new_tmp_dir,
retry,
run_safe,
save_file,
short_uid,
to_bytes,
to_str,
unzip,
)
from localstack.utils.generic.wait_utils import wait_until
from localstack.utils.testutil import (
check_expected_lambda_log_events_length,
create_lambda_archive,
get_lambda_log_events,
)
from .functions import lambda_integration
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_LAMBDA_PYTHON = os.path.join(THIS_FOLDER, "functions", "lambda_integration.py")
TEST_LAMBDA_PYTHON_ECHO = os.path.join(THIS_FOLDER, "functions", "lambda_echo.py")
TEST_LAMBDA_PYTHON_VERSION = os.path.join(THIS_FOLDER, "functions", "lambda_python_version.py")
TEST_LAMBDA_PYTHON3 = os.path.join(THIS_FOLDER, "functions", "lambda_python3.py")
TEST_LAMBDA_INTEGRATION_NODEJS = os.path.join(THIS_FOLDER, "functions", "lambda_integration.js")
TEST_LAMBDA_NODEJS = os.path.join(THIS_FOLDER, "functions", "lambda_handler.js")
TEST_LAMBDA_GOLANG_ZIP = os.path.join(THIS_FOLDER, "functions", "golang", "handler.zip")
TEST_LAMBDA_RUBY = os.path.join(THIS_FOLDER, "functions", "lambda_integration.rb")
TEST_LAMBDA_DOTNETCORE2 = os.path.join(THIS_FOLDER, "functions", "dotnetcore2", "dotnetcore2.zip")
TEST_LAMBDA_DOTNETCORE31 = os.path.join(
THIS_FOLDER, "functions", "dotnetcore31", "dotnetcore31.zip"
)
TEST_LAMBDA_CUSTOM_RUNTIME = os.path.join(THIS_FOLDER, "functions", "custom-runtime")
TEST_LAMBDA_JAVA_WITH_LIB = os.path.join(
THIS_FOLDER, "functions", "java", "lambda_echo", "lambda-function-with-lib-0.0.1.jar"
)
TEST_LAMBDA_JAVA_MULTIPLE_HANDLERS = os.path.join(
THIS_FOLDER,
"functions",
"java",
"lambda_multiple_handlers",
"build",
"distributions",
"lambda-function-with-multiple-handlers.zip",
)
TEST_LAMBDA_ENV = os.path.join(THIS_FOLDER, "functions", "lambda_environment.py")
TEST_LAMBDA_SEND_MESSAGE_FILE = os.path.join(THIS_FOLDER, "functions", "lambda_send_message.py")
TEST_LAMBDA_PUT_ITEM_FILE = os.path.join(THIS_FOLDER, "functions", "lambda_put_item.py")
TEST_LAMBDA_START_EXECUTION_FILE = os.path.join(
THIS_FOLDER, "functions", "lambda_start_execution.py"
)
TEST_LAMBDA_FUNCTION_PREFIX = "lambda-function"
TEST_GOLANG_LAMBDA_URL_TEMPLATE = "https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/example-handler-{os}-{arch}.tar.gz"
TEST_LAMBDA_LIBS = [
"localstack_client",
"requests",
"psutil",
"urllib3",
"chardet",
"certifi",
"idna",
"pip",
"dns",
]
PYTHON_TEST_RUNTIMES = (
[
LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_PYTHON37,
LAMBDA_RUNTIME_PYTHON38,
LAMBDA_RUNTIME_PYTHON39,
]
if use_docker()
else [LAMBDA_RUNTIME_PYTHON38]
)
NODE_TEST_RUNTIMES = (
[
LAMBDA_RUNTIME_NODEJS10X,
LAMBDA_RUNTIME_NODEJS12X,
LAMBDA_RUNTIME_NODEJS14X,
]
if use_docker()
else [LAMBDA_RUNTIME_NODEJS14X]
)
JAVA_TEST_RUNTIMES = (
[
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA8_AL2,
LAMBDA_RUNTIME_JAVA11,
]
if use_docker()
else [LAMBDA_RUNTIME_JAVA11]
)
PROVIDED_TEST_RUNTIMES = [
LAMBDA_RUNTIME_PROVIDED,
# TODO remove skip once we use correct images
pytest.param(
LAMBDA_RUNTIME_PROVIDED_AL2, marks=pytest.mark.skip("curl missing in provided.al2 image")
),
]
@pytest.fixture
def check_lambda_logs(logs_client):
def _check_logs(func_name, expected_lines=None):
if not expected_lines:
expected_lines = []
log_events = get_lambda_logs(func_name, logs_client=logs_client)
log_messages = [e["message"] for e in log_events]
for line in expected_lines:
if ".*" in line:
found = [re.match(line, m) for m in log_messages]
if any(found):
continue
assert line in log_messages
return _check_logs
def get_lambda_logs(func_name, logs_client=None):
logs_client = logs_client or aws_stack.create_external_boto_client("logs")
log_group_name = "/aws/lambda/%s" % func_name
streams = logs_client.describe_log_streams(logGroupName=log_group_name)["logStreams"]
streams = sorted(streams, key=lambda x: x["creationTime"], reverse=True)
log_events = logs_client.get_log_events(
logGroupName=log_group_name, logStreamName=streams[0]["logStreamName"]
)["events"]
return log_events
# API only functions (no lambda execution itself)
class TestLambdaAPI:
def test_create_lambda_function(self, lambda_client):
"""Basic test that creates and deletes a Lambda function"""
func_name = f"lambda_func-{short_uid()}"
kms_key_arn = f"arn:{aws_stack.get_partition()}:kms:{aws_stack.get_region()}:{TEST_AWS_ACCOUNT_ID}:key11"
vpc_config = {
"SubnetIds": ["subnet-123456789"],
"SecurityGroupIds": ["sg-123456789"],
}
tags = {"env": "testing"}
kwargs = {
"FunctionName": func_name,
"Runtime": LAMBDA_RUNTIME_PYTHON37,
"Handler": LAMBDA_DEFAULT_HANDLER,
"Role": LAMBDA_TEST_ROLE,
"KMSKeyArn": kms_key_arn,
"Code": {
"ZipFile": create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON_ECHO), get_content=True
)
},
"Timeout": 3,
"VpcConfig": vpc_config,
"Tags": tags,
"Environment": {"Variables": {"foo": "bar"}},
}
result = lambda_client.create_function(**kwargs)
function_arn = result["FunctionArn"]
assert testutil.response_arn_matches_partition(lambda_client, function_arn)
partial_function_arn = ":".join(function_arn.split(":")[3:])
# Get function by Name, ARN and partial ARN
for func_ref in [func_name, function_arn, partial_function_arn]:
rs = lambda_client.get_function(FunctionName=func_ref)
assert rs["Configuration"].get("KMSKeyArn", "") == kms_key_arn
assert rs["Configuration"].get("VpcConfig", {}) == vpc_config
assert rs["Tags"] == tags
# clean up
lambda_client.delete_function(FunctionName=func_name)
with pytest.raises(Exception) as exc:
lambda_client.delete_function(FunctionName=func_name)
assert "ResourceNotFoundException" in str(exc)
def test_add_lambda_permission(self, lambda_client, iam_client, create_lambda_function):
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
# create lambda permission
action = "lambda:InvokeFunction"
sid = "s3"
principal = "s3.amazonaws.com"
resp = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal=principal,
SourceArn=aws_stack.s3_bucket_arn("test-bucket"),
)
assert "Statement" in resp
# fetch lambda policy
policy = lambda_client.get_policy(FunctionName=function_name)["Policy"]
assert isinstance(policy, str)
policy = json.loads(to_str(policy))
assert action == policy["Statement"][0]["Action"]
assert sid == policy["Statement"][0]["Sid"]
assert lambda_api.func_arn(function_name) == policy["Statement"][0]["Resource"]
assert principal == policy["Statement"][0]["Principal"]["Service"]
assert (
aws_stack.s3_bucket_arn("test-bucket")
== policy["Statement"][0]["Condition"]["ArnLike"]["AWS:SourceArn"]
)
# fetch IAM policy
policies = iam_client.list_policies(Scope="Local", MaxItems=500)["Policies"]
policy_name = get_lambda_policy_name(function_name)
matching = [p for p in policies if p["PolicyName"] == policy_name]
assert len(matching) == 1
assert ":policy/" in matching[0]["Arn"]
# remove permission that we just added
resp = lambda_client.remove_permission(
FunctionName=function_name,
StatementId=sid,
Qualifier="qual1",
RevisionId="r1",
)
assert 200 == resp["ResponseMetadata"]["HTTPStatusCode"]
def test_add_lambda_multiple_permission(
self, iam_client, lambda_client, create_lambda_function
):
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
# create lambda permissions
action = "lambda:InvokeFunction"
principal = "s3.amazonaws.com"
statement_ids = ["s4", "s5"]
for sid in statement_ids:
resp = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal=principal,
SourceArn=aws_stack.s3_bucket_arn("test-bucket"),
)
assert "Statement" in resp
# fetch IAM policy
policies = iam_client.list_policies(Scope="Local", MaxItems=500)["Policies"]
policy_name = get_lambda_policy_name(function_name)
matching = [p for p in policies if p["PolicyName"] == policy_name]
assert 1 == len(matching)
assert ":policy/" in matching[0]["Arn"]
# validate both statements
policy = matching[0]
versions = iam_client.list_policy_versions(PolicyArn=policy["Arn"])["Versions"]
assert 1 == len(versions)
statements = versions[0]["Document"]["Statement"]
for i in range(len(statement_ids)):
assert action == statements[i]["Action"]
assert lambda_api.func_arn(function_name) == statements[i]["Resource"]
assert principal == statements[i]["Principal"]["Service"]
assert (
aws_stack.s3_bucket_arn("test-bucket")
== statements[i]["Condition"]["ArnLike"]["AWS:SourceArn"]
)
# check statement_ids in reverse order
assert statement_ids[abs(i - 1)] == statements[i]["Sid"]
# remove permission that we just added
resp = lambda_client.remove_permission(
FunctionName=function_name,
StatementId=sid,
Qualifier="qual1",
RevisionId="r1",
)
assert 200 == resp["ResponseMetadata"]["HTTPStatusCode"]
def test_lambda_asynchronous_invocations(self, lambda_client, create_lambda_function):
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
# adding event invoke config
response = lambda_client.put_function_event_invoke_config(
FunctionName=function_name,
MaximumRetryAttempts=123,
MaximumEventAgeInSeconds=123,
DestinationConfig={
"OnSuccess": {"Destination": function_name},
"OnFailure": {"Destination": function_name},
},
)
destination_config = {
"OnSuccess": {"Destination": function_name},
"OnFailure": {"Destination": function_name},
}
# checking for parameter configuration
assert 123 == response["MaximumRetryAttempts"]
assert 123 == response["MaximumEventAgeInSeconds"]
assert destination_config == response["DestinationConfig"]
# over writing event invoke config
response = lambda_client.put_function_event_invoke_config(
FunctionName=function_name,
MaximumRetryAttempts=123,
DestinationConfig={
"OnSuccess": {"Destination": function_name},
"OnFailure": {"Destination": function_name},
},
)
# checking if 'MaximumEventAgeInSeconds' is removed
assert "MaximumEventAgeInSeconds" not in response
assert isinstance(response["LastModified"], datetime)
# updating event invoke config
response = lambda_client.update_function_event_invoke_config(
FunctionName=function_name,
MaximumRetryAttempts=111,
)
# checking for updated and existing configuration
assert 111 == response["MaximumRetryAttempts"]
assert destination_config == response["DestinationConfig"]
# clean up
lambda_client.delete_function_event_invoke_config(FunctionName=function_name)
def test_function_concurrency(self, lambda_client, create_lambda_function):
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
response = lambda_client.put_function_concurrency(
FunctionName=function_name, ReservedConcurrentExecutions=123
)
assert "ReservedConcurrentExecutions" in response
response = lambda_client.get_function_concurrency(FunctionName=function_name)
assert "ReservedConcurrentExecutions" in response
response = lambda_client.delete_function_concurrency(FunctionName=function_name)
assert "ReservedConcurrentExecutions" not in response
def test_function_code_signing_config(self, lambda_client, create_lambda_function):
function_name = f"lambda_func-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
response = lambda_client.create_code_signing_config(
Description="Testing CodeSigning Config",
AllowedPublishers={
"SigningProfileVersionArns": [
"arn:aws:signer:%s:000000000000:/signing-profiles/test"
% aws_stack.get_region(),
]
},
CodeSigningPolicies={"UntrustedArtifactOnDeployment": "Enforce"},
)
assert "Description" in response["CodeSigningConfig"]
assert "SigningProfileVersionArns" in response["CodeSigningConfig"]["AllowedPublishers"]
assert (
"UntrustedArtifactOnDeployment" in response["CodeSigningConfig"]["CodeSigningPolicies"]
)
code_signing_arn = response["CodeSigningConfig"]["CodeSigningConfigArn"]
response = lambda_client.update_code_signing_config(
CodeSigningConfigArn=code_signing_arn,
CodeSigningPolicies={"UntrustedArtifactOnDeployment": "Warn"},
)
assert (
"Warn"
== response["CodeSigningConfig"]["CodeSigningPolicies"]["UntrustedArtifactOnDeployment"]
)
response = lambda_client.get_code_signing_config(CodeSigningConfigArn=code_signing_arn)
assert 200 == response["ResponseMetadata"]["HTTPStatusCode"]
response = lambda_client.put_function_code_signing_config(
CodeSigningConfigArn=code_signing_arn, FunctionName=function_name
)
assert 200 == response["ResponseMetadata"]["HTTPStatusCode"]
response = lambda_client.get_function_code_signing_config(FunctionName=function_name)
assert 200 == response["ResponseMetadata"]["HTTPStatusCode"]
assert code_signing_arn == response["CodeSigningConfigArn"]
assert function_name == response["FunctionName"]
response = lambda_client.delete_function_code_signing_config(FunctionName=function_name)
assert 204 == response["ResponseMetadata"]["HTTPStatusCode"]
response = lambda_client.delete_code_signing_config(CodeSigningConfigArn=code_signing_arn)
assert 204 == response["ResponseMetadata"]["HTTPStatusCode"]
def create_multiple_lambda_permissions(self, lambda_client, iam_client, create_lambda_function):
role_name = f"role-{short_uid()}"
function_name = f"test-function-{short_uid()}"
assume_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {"Service": "lambda.amazonaws.com"},
}
],
}
iam_client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=json.dumps(assume_policy_document),
)
create_lambda_function(
funct_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON37,
libs=TEST_LAMBDA_LIBS,
)
action = "lambda:InvokeFunction"
sid = "logs"
resp = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal="logs.amazonaws.com",
)
assert "Statement" in resp
sid = "kinesis"
resp = lambda_client.add_permission(
FunctionName=function_name,
Action=action,
StatementId=sid,
Principal="kinesis.amazonaws.com",
)
assert "Statement" in resp
class TestLambdaBaseFeatures:
def test_dead_letter_queue(
self, lambda_client, create_lambda_function, sqs_client, sqs_create_queue, sqs_queue_arn
):
# create DLQ and Lambda function
queue_name = f"test-{short_uid()}"
lambda_name = f"test-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
queue_arn = sqs_queue_arn(queue_url)
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON,
func_name=lambda_name,
libs=TEST_LAMBDA_LIBS,
runtime=LAMBDA_RUNTIME_PYTHON36,
DeadLetterConfig={"TargetArn": queue_arn},
)
# invoke Lambda, triggering an error
payload = {lambda_integration.MSG_BODY_RAISE_ERROR_FLAG: 1}
lambda_client.invoke(
FunctionName=lambda_name,
Payload=json.dumps(payload),
InvocationType="Event",
)
# assert that message has been received on the DLQ
def receive_dlq():
result = sqs_client.receive_message(QueueUrl=queue_url, MessageAttributeNames=["All"])
assert len(result["Messages"]) > 0
msg_attrs = result["Messages"][0]["MessageAttributes"]
assert "RequestID" in msg_attrs
assert "ErrorCode" in msg_attrs
assert "ErrorMessage" in msg_attrs
retry(receive_dlq, retries=10, sleep=2)
# update DLQ config
lambda_client.update_function_configuration(FunctionName=lambda_name, DeadLetterConfig={})
# invoke Lambda again, assert that status code is 200 and error details contained in the payload
result = lambda_client.invoke(
FunctionName=lambda_name, Payload=json.dumps(payload), LogType="Tail"
)
payload = json.loads(to_str(result["Payload"].read()))
assert 200 == result["StatusCode"]
assert "Unhandled" == result["FunctionError"]
assert "$LATEST" == result["ExecutedVersion"]
assert "Test exception" in payload["errorMessage"]
assert "Exception" in payload["errorType"]
assert isinstance(payload["stackTrace"], list)
log_result = result.get("LogResult")
assert log_result
logs = to_str(base64.b64decode(to_str(log_result)))
assert "START" in logs
assert "Test exception" in logs
assert "END" in logs
assert "REPORT" in logs
@pytest.mark.parametrize(
"condition,payload",
[
("Success", {}),
("RetriesExhausted", {lambda_integration.MSG_BODY_RAISE_ERROR_FLAG: 1}),
],
)
def test_assess_lambda_destination_invocation(
self,
condition,
payload,
lambda_client,
sqs_client,
create_lambda_function,
sqs_create_queue,
sqs_queue_arn,
):
# create DLQ and Lambda function
queue_name = f"test-{short_uid()}"
lambda_name = f"test-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
queue_arn = sqs_queue_arn(queue_url)
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON,
func_name=lambda_name,
libs=TEST_LAMBDA_LIBS,
)
lambda_client.put_function_event_invoke_config(
FunctionName=lambda_name,
DestinationConfig={
"OnSuccess": {"Destination": queue_arn},
"OnFailure": {"Destination": queue_arn},
},
)
lambda_client.invoke(
FunctionName=lambda_name,
Payload=json.dumps(payload),
InvocationType="Event",
)
def receive_message():
rs = sqs_client.receive_message(QueueUrl=queue_url, MessageAttributeNames=["All"])
assert len(rs["Messages"]) > 0
msg = rs["Messages"][0]["Body"]
msg = json.loads(msg)
assert condition == msg["requestContext"]["condition"]
retry(receive_message, retries=10, sleep=3)
def test_large_payloads(self, caplog, lambda_client, create_lambda_function):
# Set the loglevel to INFO for this test to avoid breaking a CI environment (due to excessive log outputs)
caplog.set_level(logging.INFO)
function_name = f"large_payload-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36,
)
payload = {"test": "test123456" * 100 * 1000 * 5} # 5MB payload
payload_bytes = to_bytes(json.dumps(payload))
result = lambda_client.invoke(FunctionName=function_name, Payload=payload_bytes)
assert 200 == result["ResponseMetadata"]["HTTPStatusCode"]
result_data = result["Payload"].read()
result_data = json.loads(to_str(result_data))
assert payload == result_data
parametrize_python_runtimes = pytest.mark.parametrize(
"runtime",
PYTHON_TEST_RUNTIMES,
)
class TestPythonRuntimes:
@pytest.fixture(
params=PYTHON_TEST_RUNTIMES,
)
def python_function_name(self, request, lambda_client, create_lambda_function):
function_name = f"python-test-function-{short_uid()}"
create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_PYTHON,
libs=TEST_LAMBDA_LIBS,
runtime=request.param,
)
return function_name
def test_invocation_type_not_set(self, lambda_client, python_function_name):
result = lambda_client.invoke(
FunctionName=python_function_name, Payload=b"{}", LogType="Tail"
)
result_data = json.loads(result["Payload"].read())
# assert response details
assert 200 == result["StatusCode"]
assert {} == result_data["event"]
# assert that logs are contained in response
logs = result.get("LogResult", "")
logs = to_str(base64.b64decode(to_str(logs)))
assert "START" in logs
assert "Lambda log message" in logs
assert "END" in logs
assert "REPORT" in logs
def test_invocation_type_request_response(self, lambda_client, python_function_name):
result = lambda_client.invoke(
FunctionName=python_function_name,
Payload=b"{}",
InvocationType="RequestResponse",
)
result_data = result["Payload"].read()
result_data = json.loads(to_str(result_data))
assert "application/json" == result["ResponseMetadata"]["HTTPHeaders"]["content-type"]
assert 200 == result["StatusCode"]
assert isinstance(result_data, dict)
def test_invocation_type_event(self, lambda_client, python_function_name):
result = lambda_client.invoke(
FunctionName=python_function_name, Payload=b"{}", InvocationType="Event"
)
assert 202 == result["StatusCode"]
def test_invocation_type_dry_run(self, lambda_client, python_function_name):
result = lambda_client.invoke(
FunctionName=python_function_name, Payload=b"{}", InvocationType="DryRun"
)
assert 204 == result["StatusCode"]
@parametrize_python_runtimes
def test_lambda_environment(self, lambda_client, create_lambda_function, runtime):
function_name = f"env-test-function-{short_uid()}"
env_vars = {"Hello": "World"}
create_lambda_function(
handler_file=TEST_LAMBDA_ENV,
libs=TEST_LAMBDA_LIBS,
func_name=function_name,
envvars=env_vars,
runtime=runtime,
)
# invoke function and assert result contains env vars
result = lambda_client.invoke(FunctionName=function_name, Payload=b"{}")
result_data = result["Payload"]
assert 200 == result["StatusCode"]
assert json.load(result_data) == env_vars
# get function config and assert result contains env vars
result = lambda_client.get_function_configuration(FunctionName=function_name)
assert result["Environment"] == {"Variables": env_vars}
@parametrize_python_runtimes
def test_invocation_with_qualifier(
self, lambda_client, s3_client, s3_bucket, runtime, check_lambda_logs
):
function_name = f"test_lambda_{short_uid()}"
bucket_key = "test_lambda.zip"
# upload zip file to S3
zip_file = create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=runtime
)
s3_client.upload_fileobj(BytesIO(zip_file), s3_bucket, bucket_key)
# create lambda function
response = lambda_client.create_function(
FunctionName=function_name,
Runtime=runtime,
Role="r1", # TODO
Publish=True,
Handler="handler.handler",
Code={"S3Bucket": s3_bucket, "S3Key": bucket_key},
Timeout=10,
)
assert "Version" in response
# invoke lambda function
data_before = b'{"foo": "bar with \'quotes\\""}'
result = lambda_client.invoke(
FunctionName=function_name, Payload=data_before, Qualifier=response["Version"]
)
data_after = json.loads(result["Payload"].read())
assert json.loads(to_str(data_before)) == data_after["event"]
context = data_after["context"]
assert response["Version"] == context["function_version"]
assert context.get("aws_request_id")
assert function_name == context["function_name"]
assert "/aws/lambda/%s" % function_name == context["log_group_name"]
assert context.get("log_stream_name")
assert context.get("memory_limit_in_mb")
# assert that logs are present
expected = ["Lambda log message - print function"]
if use_docker():
# Note that during regular test execution, nosetests captures the output from
# the logging module - hence we can only expect this when running in Docker
expected.append(".*Lambda log message - logging module")
check_lambda_logs(function_name, expected_lines=expected)
lambda_client.delete_function(FunctionName=function_name)
@parametrize_python_runtimes
def test_upload_lambda_from_s3(self, lambda_client, s3_client, s3_bucket, runtime):
function_name = f"test_lambda_{short_uid()}"
bucket_key = "test_lambda.zip"
# upload zip file to S3
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=runtime
)
s3_client.upload_fileobj(BytesIO(zip_file), s3_bucket, bucket_key)
# create lambda function
lambda_client.create_function(
FunctionName=function_name,
Runtime=runtime,
Handler="handler.handler",
Role="r1",
Code={"S3Bucket": s3_bucket, "S3Key": bucket_key},
Timeout=10,
)
# invoke lambda function
data_before = b'{"foo": "bar with \'quotes\\""}'
result = lambda_client.invoke(FunctionName=function_name, Payload=data_before)
data_after = json.loads(result["Payload"].read())
assert json.loads(to_str(data_before)) == data_after["event"]
context = data_after["context"]
assert "$LATEST" == context["function_version"]
assert function_name == context["function_name"]
# clean up
lambda_client.delete_function(FunctionName=function_name)
@parametrize_python_runtimes
def test_handler_in_submodule(self, lambda_client, create_lambda_function, runtime):
function_name = f"test-function-{short_uid()}"
zip_file = testutil.create_lambda_archive(
load_file(TEST_LAMBDA_PYTHON),
get_content=True,
libs=TEST_LAMBDA_LIBS,
runtime=runtime,
file_name="localstack_package/def/main.py",
)
create_lambda_function(
func_name=function_name,
zip_file=zip_file,
handler="localstack_package.def.main.handler",
runtime=runtime,
)
# invoke function and assert result
result = lambda_client.invoke(FunctionName=function_name, Payload=b"{}")
result_data = json.loads(result["Payload"].read())
assert 200 == result["StatusCode"]
assert json.loads("{}") == result_data["event"]
@parametrize_python_runtimes
def test_lambda_send_message_to_sqs(
self, lambda_client, create_lambda_function, sqs_client, sqs_create_queue, runtime
):
function_name = f"test-function-{short_uid()}"
queue_name = f"lambda-queue-{short_uid()}"
queue_url = sqs_create_queue(QueueName=queue_name)
create_lambda_function(
handler_file=TEST_LAMBDA_SEND_MESSAGE_FILE,
func_name=function_name,
runtime=runtime,
)
event = {
"message": f"message-from-test-lambda-{short_uid()}",
"queue_name": queue_name,
"region_name": config.DEFAULT_REGION,
}
lambda_client.invoke(FunctionName=function_name, Payload=json.dumps(event))
# assert that message has been received on the Queue
def receive_message():
rs = sqs_client.receive_message(QueueUrl=queue_url, MessageAttributeNames=["All"])
assert len(rs["Messages"]) > 0
return rs["Messages"][0]
message = retry(receive_message, retries=3, sleep=2)
assert event["message"] == message["Body"]
@parametrize_python_runtimes
def test_lambda_put_item_to_dynamodb(
self, lambda_client, create_lambda_function, dynamodb_create_table, runtime
):
table_name = f"ddb-table-{short_uid()}"
function_name = f"test-function-{short_uid()}"
dynamodb_create_table(table_name=table_name, partition_key="id")
create_lambda_function(
handler_file=TEST_LAMBDA_PUT_ITEM_FILE,
func_name=function_name,
runtime=runtime,
)
data = {short_uid(): f"data-{i}" for i in range(3)}
event = {
"table_name": table_name,
"region_name": config.DEFAULT_REGION,
"items": [{"id": k, "data": v} for k, v in data.items()],
}
lambda_client.invoke(FunctionName=function_name, Payload=json.dumps(event))
dynamodb = aws_stack.connect_to_resource("dynamodb") # TODO convert to fixture
rs = dynamodb.Table(table_name).scan()
items = rs["Items"]
assert len(items) == len(data.keys())
for item in items:
assert data[item["id"]] == item["data"]
@parametrize_python_runtimes
def test_lambda_start_stepfunctions_execution(
self, lambda_client, stepfunctions_client, create_lambda_function, runtime
):
function_name = f"test-function-{short_uid()}"
resource_lambda_name = f"test-resource-{short_uid()}"
state_machine_name = f"state-machine-{short_uid()}"
create_lambda_function(
handler_file=TEST_LAMBDA_START_EXECUTION_FILE,
func_name=function_name,
runtime=runtime,
)
create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=resource_lambda_name,
runtime=runtime,
)
state_machine_def = {
"StartAt": "step1",
"States": {
"step1": {
"Type": "Task",
"Resource": aws_stack.lambda_function_arn(resource_lambda_name),
"ResultPath": "$.result_value",
"End": True,
}
},
}
rs = stepfunctions_client.create_state_machine(
name=state_machine_name,
definition=json.dumps(state_machine_def),
roleArn=aws_stack.role_arn("sfn_role"),
)
sm_arn = rs["stateMachineArn"]
try:
lambda_client.invoke(
FunctionName=function_name,
Payload=json.dumps(
{
"state_machine_arn": sm_arn,
"region_name": config.DEFAULT_REGION,
"input": {},
}
),
)
time.sleep(1)
rs = stepfunctions_client.list_executions(stateMachineArn=sm_arn)
# assert that state machine get executed 1 time
assert 1 == len([ex for ex in rs["executions"] if ex["stateMachineArn"] == sm_arn])
finally:
# clean up
stepfunctions_client.delete_state_machine(stateMachineArn=sm_arn)
@pytest.mark.skipif(
not use_docker(), reason="Test for docker python runtimes not applicable if run locally"
)
@parametrize_python_runtimes
def test_python_runtime_correct_versions(self, lambda_client, create_lambda_function, runtime):
function_name = f"test_python_executor_{short_uid()}"
create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_PYTHON_VERSION,
runtime=runtime,
)
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b"{}",
)
result = json.loads(to_str(result["Payload"].read()))
assert result["version"] == runtime
parametrize_node_runtimes = pytest.mark.parametrize(
"runtime",
NODE_TEST_RUNTIMES,
)
class TestNodeJSRuntimes:
@pytest.mark.skipif(
not use_docker(), reason="Test for docker nodejs runtimes not applicable if run locally"
)
@parametrize_node_runtimes
def test_nodejs_lambda_with_context(
self, lambda_client, create_lambda_function, runtime, check_lambda_logs
):
function_name = f"test-function-{short_uid()}"
create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_INTEGRATION_NODEJS,
handler="lambda_integration.handler",
runtime=runtime,
)
ctx = {
"custom": {"foo": "bar"},
"client": {"snap": ["crackle", "pop"]},
"env": {"fizz": "buzz"},
}
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b"{}",
ClientContext=to_str(base64.b64encode(to_bytes(json.dumps(ctx)))),
)
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert "bar" == json.loads(json.loads(result_data)["context"]["clientContext"]).get(
"custom"
).get("foo")
# assert that logs are present
expected = [".*Node.js Lambda handler executing."]
check_lambda_logs(function_name, expected_lines=expected)
@parametrize_node_runtimes
def test_invoke_nodejs_lambda(self, lambda_client, create_lambda_function, runtime):
function_name = f"test-function-{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=testutil.create_zip_file(TEST_LAMBDA_NODEJS, get_content=True),
runtime=runtime,
handler="lambda_handler.handler",
)
rs = lambda_client.invoke(
FunctionName=function_name,
Payload=json.dumps({"event_type": "test_lambda"}),
)
assert 200 == rs["ResponseMetadata"]["HTTPStatusCode"]
payload = rs["Payload"].read()
response = json.loads(to_str(payload))
assert "response from localstack lambda" in response["body"]
events = get_lambda_log_events(function_name)
assert len(events) > 0
@parametrize_node_runtimes
def test_invoke_nodejs_lambda_with_payload_containing_quotes(
self, lambda_client, create_lambda_function, runtime
):
function_name = "test_lambda_%s" % short_uid()
create_lambda_function(
func_name=function_name,
zip_file=testutil.create_zip_file(TEST_LAMBDA_NODEJS, get_content=True),
runtime=runtime,
handler="lambda_handler.handler",
)
test_string = "test_string' with some quotes"
body = '{"test_var": "%s"}' % test_string
rs = lambda_client.invoke(
FunctionName=function_name,
Payload=body,
)
assert 200 == rs["ResponseMetadata"]["HTTPStatusCode"]
payload = rs["Payload"].read()
response = json.loads(to_str(payload))
assert "response from localstack lambda" in response["body"]
events = get_lambda_log_events(function_name)
assert len(events) > 0
assert test_string in str(events[0])
class TestCustomRuntimes:
@pytest.mark.skipif(
not use_docker(), reason="Test for docker provided runtimes not applicable if run locally"
)
@pytest.mark.parametrize(
"runtime",
PROVIDED_TEST_RUNTIMES,
)
def test_provided_runtimes(
self, lambda_client, create_lambda_function, runtime, check_lambda_logs
):
function_name = f"test-function-{short_uid()}"
create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_CUSTOM_RUNTIME,
handler="function.handler",
runtime=runtime,
)
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b'{"text": "bar with \'quotes\\""}',
)
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
result_data = to_str(result_data).strip()
# jsonify in pro (re-)formats the event json so we allow both versions here
assert result_data in (
"""Echoing request: '{"text": "bar with \'quotes\\""}'""",
"""Echoing request: '{"text":"bar with \'quotes\\""}'""",
)
# assert that logs are present
expected = [".*Custom Runtime Lambda handler executing."]
check_lambda_logs(function_name, expected_lines=expected)
class TestDotNetCoreRuntimes:
@pytest.mark.skipif(
not use_docker(), reason="Dotnet functions only supported with docker executor"
)
@pytest.mark.parametrize(
"zip_file,handler,runtime,expected_lines",
[
(
TEST_LAMBDA_DOTNETCORE2,
"DotNetCore2::DotNetCore2.Lambda.Function::SimpleFunctionHandler",
LAMBDA_RUNTIME_DOTNETCORE2,
["Running .NET Core 2.0 Lambda"],
),
(
TEST_LAMBDA_DOTNETCORE31,
"dotnetcore31::dotnetcore31.Function::FunctionHandler",
LAMBDA_RUNTIME_DOTNETCORE31,
["Running .NET Core 3.1 Lambda"],
),
],
)
def test_dotnet_lambda(
self, zip_file, handler, runtime, expected_lines, lambda_client, create_lambda_function
):
function_name = f"test-function-{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=load_file(zip_file, mode="rb"),
handler=handler,
runtime=runtime,
)
result = lambda_client.invoke(FunctionName=function_name, Payload=b"{}")
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert "{}" == to_str(result_data).strip()
# TODO make lambda log checks more resilient to various formats
# self.check_lambda_logs(func_name, expected_lines=expected_lines)
class TestRubyRuntimes:
@pytest.mark.skipif(not use_docker(), reason="ruby runtimes not supported in local invocation")
def test_ruby_lambda_running_in_docker(self, lambda_client, create_lambda_function):
function_name = f"test-function-{short_uid()}"
create_lambda_function(
func_name=function_name,
handler_file=TEST_LAMBDA_RUBY,
handler="lambda_integration.handler",
runtime=LAMBDA_RUNTIME_RUBY27,
)
result = lambda_client.invoke(FunctionName=function_name, Payload=b"{}")
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert "{}" == to_str(result_data).strip()
class TestGolangRuntimes:
@pytest.mark.skip
def test_golang_lambda(self, lambda_client, tmp_path, create_lambda_function):
# fetch platform-specific example handler
url = TEST_GOLANG_LAMBDA_URL_TEMPLATE.format(
version=GO_RUNTIME_VERSION,
os=get_os(),
arch=get_arch(),
)
handler = tmp_path / "go-handler"
download_and_extract(url, handler)
# create function
func_name = f"test_lambda_{short_uid()}"
create_lambda_function(
func_name=func_name,
handler_file=handler,
handler="handler",
runtime=LAMBDA_RUNTIME_GOLANG,
)
# invoke
result = lambda_client.invoke(
FunctionName=func_name, Payload=json.dumps({"name": "pytest"})
)
result_data = result["Payload"].read()
assert result["StatusCode"] == 200
assert to_str(result_data).strip() == '"Hello pytest!"'
parametrize_java_runtimes = pytest.mark.parametrize(
"runtime",
JAVA_TEST_RUNTIMES,
)
class TestJavaRuntimes:
@pytest.fixture(scope="class")
def test_java_jar(self) -> bytes:
# The TEST_LAMBDA_JAVA jar file is downloaded with `make init-testlibs`.
java_file = load_file(TEST_LAMBDA_JAVA, mode="rb")
if not java_file:
raise Exception(
f"Test dependency {TEST_LAMBDA_JAVA} not found."
"Please make sure to run 'make init-testlibs' to ensure the file is available."
)
return java_file
@pytest.fixture(scope="class")
def test_java_zip(self, tmpdir_factory, test_java_jar) -> bytes:
tmpdir = tmpdir_factory.mktemp("tmp-java-zip")
zip_lib_dir = os.path.join(tmpdir, "lib")
zip_jar_path = os.path.join(zip_lib_dir, "test.lambda.jar")
mkdir(zip_lib_dir)
cp_r(
INSTALL_PATH_LOCALSTACK_FAT_JAR,
os.path.join(zip_lib_dir, "executor.lambda.jar"),
)
save_file(zip_jar_path, test_java_jar)
return testutil.create_zip_file(tmpdir, get_content=True)
@pytest.fixture(
params=JAVA_TEST_RUNTIMES,
)
def simple_java_lambda(self, create_lambda_function, test_java_zip, request):
function_name = f"java-test-function-{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=test_java_zip,
runtime=request.param,
handler="cloud.localstack.sample.LambdaHandler",
)
return function_name
def test_java_runtime(self, lambda_client, simple_java_lambda):
result = lambda_client.invoke(
FunctionName=simple_java_lambda,
Payload=b'{"echo":"echo"}',
)
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
# TODO: find out why the assertion below does not work in Travis-CI! (seems to work locally)
assert "LinkedHashMap" in to_str(result_data)
assert result_data is not None
def test_java_runtime_with_large_payload(self, lambda_client, simple_java_lambda, caplog):
# Set the loglevel to INFO for this test to avoid breaking a CI environment (due to excessive log outputs)
caplog.set_level(logging.INFO)
payload = {"test": "test123456" * 100 * 1000 * 5} # 5MB payload
payload = to_bytes(json.dumps(payload))
result = lambda_client.invoke(FunctionName=simple_java_lambda, Payload=payload)
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert "LinkedHashMap" in to_str(result_data)
assert result_data is not None
def test_java_runtime_with_lib(self, lambda_client, create_lambda_function):
java_jar_with_lib = load_file(TEST_LAMBDA_JAVA_WITH_LIB, mode="rb")
# create ZIP file from JAR file
jar_dir = new_tmp_dir()
zip_dir = new_tmp_dir()
unzip(TEST_LAMBDA_JAVA_WITH_LIB, jar_dir)
zip_lib_dir = os.path.join(zip_dir, "lib")
shutil.move(os.path.join(jar_dir, "lib"), zip_lib_dir)
jar_without_libs_file = testutil.create_zip_file(jar_dir)
shutil.copy(jar_without_libs_file, os.path.join(zip_lib_dir, "lambda.jar"))
java_zip_with_lib = testutil.create_zip_file(zip_dir, get_content=True)
java_zip_with_lib_gradle = load_file(
os.path.join(
THIS_FOLDER,
"functions",
"java",
"lambda_echo",
"build",
"distributions",
"lambda-function-built-by-gradle.zip",
),
mode="rb",
)
for archive in [java_jar_with_lib, java_zip_with_lib, java_zip_with_lib_gradle]:
lambda_name = "test-function-%s" % short_uid()
create_lambda_function(
func_name=lambda_name,
zip_file=archive,
runtime=LAMBDA_RUNTIME_JAVA11,
handler="cloud.localstack.sample.LambdaHandlerWithLib",
)
result = lambda_client.invoke(FunctionName=lambda_name, Payload=b'{"echo":"echo"}')
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert "echo" in to_str(result_data)
def test_sns_event(self, lambda_client, simple_java_lambda):
result = lambda_client.invoke(
FunctionName=simple_java_lambda,
InvocationType="Event",
Payload=b'{"Records": [{"Sns": {"Message": "{}"}}]}',
)
assert 202 == result["StatusCode"]
def test_ddb_event(self, lambda_client, simple_java_lambda):
result = lambda_client.invoke(
FunctionName=simple_java_lambda,
InvocationType="Event",
Payload=b'{"Records": [{"dynamodb": {"Message": "{}"}}]}',
)
assert 202 == result["StatusCode"]
@parametrize_java_runtimes
def test_kinesis_invocation(
self, lambda_client, create_lambda_function, test_java_zip, runtime
):
payload = (
b'{"Records": [{'
b'"kinesis": {"data": "dGVzdA==", "partitionKey": "partition"},'
b'"eventID": "shardId-000000000001:12345678901234567890123456789012345678901234567890",'
b'"eventSourceARN": "arn:aws:kinesis:us-east-1:123456789012:stream/test"}]}'
)
# deploy lambda - Java with Kinesis input object
function_name = f"test-lambda-{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=test_java_zip,
runtime=runtime,
handler="cloud.localstack.awssdkv1.sample.KinesisLambdaHandler",
)
result = lambda_client.invoke(FunctionName=function_name, Payload=payload)
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert '"test "' == to_str(result_data).strip()
def test_kinesis_event(self, lambda_client, simple_java_lambda):
payload = (
b'{"Records": [{'
b'"kinesis": {"data": "dGVzdA==", "partitionKey": "partition"},'
b'"eventID": "shardId-000000000001:12345678901234567890123456789012345678901234567890",'
b'"eventSourceARN": "arn:aws:kinesis:us-east-1:123456789012:stream/test"}]}'
)
result = lambda_client.invoke(
FunctionName=simple_java_lambda,
InvocationType="Event",
Payload=payload,
)
result_data = result["Payload"].read()
assert 202 == result["StatusCode"]
assert "" == to_str(result_data).strip()
@parametrize_java_runtimes
def test_stream_handler(self, lambda_client, create_lambda_function, test_java_jar, runtime):
function_name = f"test-lambda-{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=test_java_jar,
runtime=runtime,
handler="cloud.localstack.awssdkv1.sample.LambdaStreamHandler",
)
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b'{"echo":"echo"}',
)
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert "{}" == to_str(result_data).strip()
@parametrize_java_runtimes
def test_serializable_input_object(
self, lambda_client, create_lambda_function, test_java_zip, runtime
):
# deploy lambda - Java with serializable input object
function_name = f"test-lambda-{short_uid()}"
create_lambda_function(
func_name=function_name,
zip_file=test_java_zip,
runtime=runtime,
handler="cloud.localstack.awssdkv1.sample.SerializedInputLambdaHandler",
)
result = lambda_client.invoke(
FunctionName=function_name,
Payload=b'{"bucket": "test_bucket", "key": "test_key"}',
)
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert json.loads(to_str(result_data)) == {
"validated": True,
"bucket": "test_bucket",
"key": "test_key",
}
def test_trigger_java_lambda_through_sns(
self, lambda_client, s3_client, sns_client, simple_java_lambda, s3_bucket, sns_create_topic
):
topic_name = "topic-%s" % short_uid()
key = "key-%s" % short_uid()
function_name = simple_java_lambda
topic_arn = sns_create_topic(Name=topic_name)["TopicArn"]
s3_client.put_bucket_notification_configuration(
Bucket=s3_bucket,
NotificationConfiguration={
"TopicConfigurations": [{"TopicArn": topic_arn, "Events": ["s3:ObjectCreated:*"]}]
},
)
sns_client.subscribe(
TopicArn=topic_arn,
Protocol="lambda",
Endpoint=aws_stack.lambda_function_arn(function_name),
)
events_before = run_safe(get_lambda_log_events, function_name, regex_filter="Records") or []
s3_client.put_object(Bucket=s3_bucket, Key=key, Body="something")
time.sleep(2)
# We got an event that confirm lambda invoked
retry(
function=check_expected_lambda_log_events_length,
retries=3,
sleep=1,
expected_length=len(events_before) + 1,
function_name=function_name,
regex_filter="Records",
)
# clean up
s3_client.delete_objects(Bucket=s3_bucket, Delete={"Objects": [{"Key": key}]})
@pytest.mark.parametrize(
"handler,expected_result",
[
(
"cloud.localstack.sample.LambdaHandlerWithInterfaceAndCustom::handleRequestCustom",
"CUSTOM",
),
("cloud.localstack.sample.LambdaHandlerWithInterfaceAndCustom", "INTERFACE"),
(
"cloud.localstack.sample.LambdaHandlerWithInterfaceAndCustom::handleRequest",
"INTERFACE",
),
],
)
# this test is only compiled against java 11
def test_java_custom_handler_method_specification(
self, lambda_client, create_lambda_function, handler, expected_result, check_lambda_logs
):
java_handler_multiple_handlers = load_file(TEST_LAMBDA_JAVA_MULTIPLE_HANDLERS, mode="rb")
expected = ['.*"echo": "echo".*']
function_name = "lambda_handler_test_%s" % short_uid()
create_lambda_function(
func_name=function_name,
zip_file=java_handler_multiple_handlers,
runtime=LAMBDA_RUNTIME_JAVA11,
handler=handler,
)
result = lambda_client.invoke(FunctionName=function_name, Payload=b'{"echo":"echo"}')
result_data = result["Payload"].read()
assert 200 == result["StatusCode"]
assert expected_result == to_str(result_data).strip('"\n ')
check_lambda_logs(function_name, expected_lines=expected)
TEST_LAMBDA_CACHE_NODEJS = os.path.join(THIS_FOLDER, "functions", "lambda_cache.js")
TEST_LAMBDA_CACHE_PYTHON = os.path.join(THIS_FOLDER, "functions", "lambda_cache.py")
TEST_LAMBDA_TIMEOUT_PYTHON = os.path.join(THIS_FOLDER, "functions", "lambda_timeout.py")
class TestLambdaBehavior:
@pytest.mark.parametrize(
["lambda_fn", "lambda_runtime"],
[
(
TEST_LAMBDA_CACHE_NODEJS,
LAMBDA_RUNTIME_NODEJS12X,
), # TODO: can we do some kind of nested parametrize here?
(TEST_LAMBDA_CACHE_PYTHON, LAMBDA_RUNTIME_PYTHON38),
],
ids=["nodejs", "python"],
)
@pytest.mark.xfail(
os.environ.get("TEST_TARGET") != "AWS_CLOUD",
reason="lambda caching not supported currently",
) # TODO: should be removed after the lambda rework
def test_lambda_cache_local(
self, lambda_client, create_lambda_function, lambda_fn, lambda_runtime
):
"""tests the local context reuse of packages in AWS lambda"""
func_name = f"test_lambda_{short_uid()}"
create_lambda_function(
func_name=func_name,
handler_file=lambda_fn,
runtime=lambda_runtime,
client=lambda_client,
)
result = lambda_client.invoke(FunctionName=func_name)
result_data = result["Payload"].read()
assert result["StatusCode"] == 200
assert json.loads(result_data)["counter"] == 0
result = lambda_client.invoke(FunctionName=func_name)
result_data = result["Payload"].read()
assert result["StatusCode"] == 200
assert json.loads(result_data)["counter"] == 1
@pytest.mark.parametrize(
["lambda_fn", "lambda_runtime"],
[
(TEST_LAMBDA_TIMEOUT_PYTHON, LAMBDA_RUNTIME_PYTHON38),
],
ids=["python"],
)
@pytest.mark.xfail(
os.environ.get("TEST_TARGET") != "AWS_CLOUD",
reason="lambda timeouts not supported currently",
) # TODO: should be removed after the lambda rework
def test_lambda_timeout_logs(
self, lambda_client, create_lambda_function, lambda_fn, lambda_runtime, logs_client
):
"""tests the local context reuse of packages in AWS lambda"""
func_name = f"test_lambda_{short_uid()}"
create_lambda_function(
func_name=func_name,
handler_file=lambda_fn,
runtime=lambda_runtime,
client=lambda_client,
timeout=1,
)
result = lambda_client.invoke(FunctionName=func_name, Payload=json.dumps({"wait": 2}))
assert result["StatusCode"] == 200
log_group_name = f"/aws/lambda/{func_name}"
ls_result = logs_client.describe_log_streams(logGroupName=log_group_name)
log_stream_name = ls_result["logStreams"][0]["logStreamName"]
log_events = logs_client.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name
)["events"]
assert any(["starting wait" in e["message"] for e in log_events])
assert not any(["done waiting" in e["message"] for e in log_events])
@pytest.mark.parametrize(
["lambda_fn", "lambda_runtime"],
[
(TEST_LAMBDA_TIMEOUT_PYTHON, LAMBDA_RUNTIME_PYTHON38),
],
ids=["python"],
)
def test_lambda_no_timeout_logs(
self, lambda_client, create_lambda_function, lambda_fn, lambda_runtime, logs_client
):
"""tests the local context reuse of packages in AWS lambda"""
func_name = f"test_lambda_{short_uid()}"
create_lambda_function(
func_name=func_name,
handler_file=lambda_fn,
runtime=lambda_runtime,
client=lambda_client,
timeout=2,
)
result = lambda_client.invoke(FunctionName=func_name, Payload=json.dumps({"wait": 1}))
assert result["StatusCode"] == 200
log_group_name = f"/aws/lambda/{func_name}"
def _log_stream_available():
result = logs_client.describe_log_streams(logGroupName=log_group_name)["logStreams"]
return len(result) > 0
wait_until(_log_stream_available, strategy="linear")
ls_result = logs_client.describe_log_streams(logGroupName=log_group_name)
log_stream_name = ls_result["logStreams"][0]["logStreamName"]
def _assert_log_output():
log_events = logs_client.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream_name
)["events"]
return any(["starting wait" in e["message"] for e in log_events]) and any(
["done waiting" in e["message"] for e in log_events]
)
wait_until(_assert_log_output, strategy="linear")
| []
| []
| [
"TEST_TARGET"
]
| [] | ["TEST_TARGET"] | python | 1 | 0 | |
clients/google-api-services-displayvideo/v1/1.31.0/com/google/api/services/displayvideo/v1/DisplayVideo.java | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.displayvideo.v1;
/**
* Service definition for DisplayVideo (v1).
*
* <p>
* Display & Video 360 API allows users to manage and create campaigns and reports.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://developers.google.com/display-video/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link DisplayVideoRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class DisplayVideo extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.31.0 of the Display & Video 360 API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://displayvideo.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://displayvideo.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public DisplayVideo(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
DisplayVideo(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Advertisers collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Advertisers.List request = displayvideo.advertisers().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Advertisers advertisers() {
return new Advertisers();
}
/**
* The "advertisers" collection of methods.
*/
public class Advertisers {
/**
* Audits an advertiser. Returns the counts of used entities per resource type under the advertiser
* provided. Used entities count towards their respective resource limit. See
* https://support.google.com/displayvideo/answer/6071450.
*
* Create a request for the method "advertisers.audit".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Audit#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser to audit.
* @return the request
*/
public Audit audit(java.lang.Long advertiserId) throws java.io.IOException {
Audit result = new Audit(advertiserId);
initialize(result);
return result;
}
public class Audit extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.AuditAdvertiserResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}:audit";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Audits an advertiser. Returns the counts of used entities per resource type under the
* advertiser provided. Used entities count towards their respective resource limit. See
* https://support.google.com/displayvideo/answer/6071450.
*
* Create a request for the method "advertisers.audit".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Audit#execute()} method to invoke the remote operation.
* <p> {@link
* Audit#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser to audit.
* @since 1.13
*/
protected Audit(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.AuditAdvertiserResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Audit set$Xgafv(java.lang.String $Xgafv) {
return (Audit) super.set$Xgafv($Xgafv);
}
@Override
public Audit setAccessToken(java.lang.String accessToken) {
return (Audit) super.setAccessToken(accessToken);
}
@Override
public Audit setAlt(java.lang.String alt) {
return (Audit) super.setAlt(alt);
}
@Override
public Audit setCallback(java.lang.String callback) {
return (Audit) super.setCallback(callback);
}
@Override
public Audit setFields(java.lang.String fields) {
return (Audit) super.setFields(fields);
}
@Override
public Audit setKey(java.lang.String key) {
return (Audit) super.setKey(key);
}
@Override
public Audit setOauthToken(java.lang.String oauthToken) {
return (Audit) super.setOauthToken(oauthToken);
}
@Override
public Audit setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Audit) super.setPrettyPrint(prettyPrint);
}
@Override
public Audit setQuotaUser(java.lang.String quotaUser) {
return (Audit) super.setQuotaUser(quotaUser);
}
@Override
public Audit setUploadType(java.lang.String uploadType) {
return (Audit) super.setUploadType(uploadType);
}
@Override
public Audit setUploadProtocol(java.lang.String uploadProtocol) {
return (Audit) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser to audit. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser to audit.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser to audit. */
public Audit setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Optional. The specific fields to return. If no mask is specified, all fields in the
* response proto will be filled. Valid values are: * usedLineItemsCount *
* usedInsertionOrdersCount * usedCampaignsCount * channelsCount *
* negativelyTargetedChannelsCount * negativeKeywordListsCount * adGroupCriteriaCount *
* campaignCriteriaCount
*/
@com.google.api.client.util.Key
private String readMask;
/** Optional. The specific fields to return. If no mask is specified, all fields in the response proto
will be filled. Valid values are: * usedLineItemsCount * usedInsertionOrdersCount *
usedCampaignsCount * channelsCount * negativelyTargetedChannelsCount * negativeKeywordListsCount *
adGroupCriteriaCount * campaignCriteriaCount
*/
public String getReadMask() {
return readMask;
}
/**
* Optional. The specific fields to return. If no mask is specified, all fields in the
* response proto will be filled. Valid values are: * usedLineItemsCount *
* usedInsertionOrdersCount * usedCampaignsCount * channelsCount *
* negativelyTargetedChannelsCount * negativeKeywordListsCount * adGroupCriteriaCount *
* campaignCriteriaCount
*/
public Audit setReadMask(String readMask) {
this.readMask = readMask;
return this;
}
@Override
public Audit set(String parameterName, Object value) {
return (Audit) super.set(parameterName, value);
}
}
/**
* Bulk edits targeting options under a single advertiser. The operation will delete the assigned
* targeting options provided in BulkEditAdvertiserAssignedTargetingOptionsRequest.delete_requests
* and then create the assigned targeting options provided in
* BulkEditAdvertiserAssignedTargetingOptionsRequest.create_requests .
*
* Create a request for the method "advertisers.bulkEditAdvertiserAssignedTargetingOptions".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkEditAdvertiserAssignedTargetingOptions#execute()} method to
* invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditAdvertiserAssignedTargetingOptionsRequest}
* @return the request
*/
public BulkEditAdvertiserAssignedTargetingOptions bulkEditAdvertiserAssignedTargetingOptions(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.BulkEditAdvertiserAssignedTargetingOptionsRequest content) throws java.io.IOException {
BulkEditAdvertiserAssignedTargetingOptions result = new BulkEditAdvertiserAssignedTargetingOptions(advertiserId, content);
initialize(result);
return result;
}
public class BulkEditAdvertiserAssignedTargetingOptions extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkEditAdvertiserAssignedTargetingOptionsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}:bulkEditAdvertiserAssignedTargetingOptions";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Bulk edits targeting options under a single advertiser. The operation will delete the assigned
* targeting options provided in BulkEditAdvertiserAssignedTargetingOptionsRequest.delete_requests
* and then create the assigned targeting options provided in
* BulkEditAdvertiserAssignedTargetingOptionsRequest.create_requests .
*
* Create a request for the method "advertisers.bulkEditAdvertiserAssignedTargetingOptions".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkEditAdvertiserAssignedTargetingOptions#execute()}
* method to invoke the remote operation. <p> {@link BulkEditAdvertiserAssignedTargetingOptions#in
* itialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be called
* to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditAdvertiserAssignedTargetingOptionsRequest}
* @since 1.13
*/
protected BulkEditAdvertiserAssignedTargetingOptions(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.BulkEditAdvertiserAssignedTargetingOptionsRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.BulkEditAdvertiserAssignedTargetingOptionsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions set$Xgafv(java.lang.String $Xgafv) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.set$Xgafv($Xgafv);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setAccessToken(java.lang.String accessToken) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setAccessToken(accessToken);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setAlt(java.lang.String alt) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setAlt(alt);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setCallback(java.lang.String callback) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setCallback(callback);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setFields(java.lang.String fields) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setFields(fields);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setKey(java.lang.String key) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setKey(key);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setOauthToken(java.lang.String oauthToken) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setOauthToken(oauthToken);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setQuotaUser(java.lang.String quotaUser) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setQuotaUser(quotaUser);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setUploadType(java.lang.String uploadType) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setUploadType(uploadType);
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser. */
public BulkEditAdvertiserAssignedTargetingOptions setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public BulkEditAdvertiserAssignedTargetingOptions set(String parameterName, Object value) {
return (BulkEditAdvertiserAssignedTargetingOptions) super.set(parameterName, value);
}
}
/**
* Lists assigned targeting options of an advertiser across targeting types.
*
* Create a request for the method "advertisers.bulkListAdvertiserAssignedTargetingOptions".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkListAdvertiserAssignedTargetingOptions#execute()} method to
* invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @return the request
*/
public BulkListAdvertiserAssignedTargetingOptions bulkListAdvertiserAssignedTargetingOptions(java.lang.Long advertiserId) throws java.io.IOException {
BulkListAdvertiserAssignedTargetingOptions result = new BulkListAdvertiserAssignedTargetingOptions(advertiserId);
initialize(result);
return result;
}
public class BulkListAdvertiserAssignedTargetingOptions extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkListAdvertiserAssignedTargetingOptionsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}:bulkListAdvertiserAssignedTargetingOptions";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists assigned targeting options of an advertiser across targeting types.
*
* Create a request for the method "advertisers.bulkListAdvertiserAssignedTargetingOptions".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkListAdvertiserAssignedTargetingOptions#execute()}
* method to invoke the remote operation. <p> {@link BulkListAdvertiserAssignedTargetingOptions#in
* itialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be called
* to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @since 1.13
*/
protected BulkListAdvertiserAssignedTargetingOptions(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.BulkListAdvertiserAssignedTargetingOptionsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public BulkListAdvertiserAssignedTargetingOptions set$Xgafv(java.lang.String $Xgafv) {
return (BulkListAdvertiserAssignedTargetingOptions) super.set$Xgafv($Xgafv);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setAccessToken(java.lang.String accessToken) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setAccessToken(accessToken);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setAlt(java.lang.String alt) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setAlt(alt);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setCallback(java.lang.String callback) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setCallback(callback);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setFields(java.lang.String fields) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setFields(fields);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setKey(java.lang.String key) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setKey(key);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setOauthToken(java.lang.String oauthToken) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setOauthToken(oauthToken);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setQuotaUser(java.lang.String quotaUser) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setQuotaUser(quotaUser);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setUploadType(java.lang.String uploadType) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setUploadType(uploadType);
}
@Override
public BulkListAdvertiserAssignedTargetingOptions setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkListAdvertiserAssignedTargetingOptions) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser the line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser the line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser the line item belongs to. */
public BulkListAdvertiserAssignedTargetingOptions setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by the
* logical operator `OR`.. * A restriction has the form of `{field} {operator} {value}`. * The
* operator must be `EQUALS (=)`. * Supported fields: - `targetingType` Examples: *
* targetingType with value TARGETING_TYPE_CHANNEL `targetingType="TARGETING_TYPE_CHANNEL"`
* The length of this field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by assigned targeting option properties. Supported syntax: * Filter expressions
are made up of one or more restrictions. * Restrictions can be combined by the logical operator
`OR`.. * A restriction has the form of `{field} {operator} {value}`. * The operator must be `EQUALS
(=)`. * Supported fields: - `targetingType` Examples: * targetingType with value
TARGETING_TYPE_CHANNEL `targetingType="TARGETING_TYPE_CHANNEL"` The length of this field should be
no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by the
* logical operator `OR`.. * A restriction has the form of `{field} {operator} {value}`. * The
* operator must be `EQUALS (=)`. * Supported fields: - `targetingType` Examples: *
* targetingType with value TARGETING_TYPE_CHANNEL `targetingType="TARGETING_TYPE_CHANNEL"`
* The length of this field should be no more than 500 characters.
*/
public BulkListAdvertiserAssignedTargetingOptions setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `targetingType` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `targetingType desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `targetingType` (default) The default
sorting order is ascending. To specify descending order for a field, a suffix "desc" should be
added to the field name. Example: `targetingType desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `targetingType` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `targetingType desc`.
*/
public BulkListAdvertiserAssignedTargetingOptions setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. The size must be an integer between `1` and `5000`. If unspecified,
* the default is '5000'. Returns error code `INVALID_ARGUMENT` if an invalid value is
* specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. The size must be an integer between `1` and `5000`. If unspecified, the
default is '5000'. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. The size must be an integer between `1` and `5000`. If unspecified,
* the default is '5000'. Returns error code `INVALID_ARGUMENT` if an invalid value is
* specified.
*/
public BulkListAdvertiserAssignedTargetingOptions setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token that lets the client fetch the next page of results. Typically, this is the value
* of next_page_token returned from the previous call to
* `BulkListAdvertiserAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token that lets the client fetch the next page of results. Typically, this is the value of
next_page_token returned from the previous call to `BulkListAdvertiserAssignedTargetingOptions`
method. If not specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token that lets the client fetch the next page of results. Typically, this is the value
* of next_page_token returned from the previous call to
* `BulkListAdvertiserAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
public BulkListAdvertiserAssignedTargetingOptions setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public BulkListAdvertiserAssignedTargetingOptions set(String parameterName, Object value) {
return (BulkListAdvertiserAssignedTargetingOptions) super.set(parameterName, value);
}
}
/**
* Creates a new advertiser. Returns the newly created advertiser if successful. This method can
* take up to 180 seconds to complete.
*
* Create a request for the method "advertisers.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.displayvideo.v1.model.Advertiser}
* @return the request
*/
public Create create(com.google.api.services.displayvideo.v1.model.Advertiser content) throws java.io.IOException {
Create result = new Create(content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Advertiser> {
private static final String REST_PATH = "v1/advertisers";
/**
* Creates a new advertiser. Returns the newly created advertiser if successful. This method can
* take up to 180 seconds to complete.
*
* Create a request for the method "advertisers.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.displayvideo.v1.model.Advertiser}
* @since 1.13
*/
protected Create(com.google.api.services.displayvideo.v1.model.Advertiser content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Advertiser.class);
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes an advertiser. Deleting an advertiser will delete all of its child resources, for
* example, campaigns, insertion orders and line items. A deleted advertiser cannot be recovered.
*
* Create a request for the method "advertisers.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser we need to delete.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId) throws java.io.IOException {
Delete result = new Delete(advertiserId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes an advertiser. Deleting an advertiser will delete all of its child resources, for
* example, campaigns, insertion orders and line items. A deleted advertiser cannot be recovered.
*
* Create a request for the method "advertisers.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser we need to delete.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser we need to delete. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser we need to delete.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser we need to delete. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets an advertiser.
*
* Create a request for the method "advertisers.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser to fetch.
* @return the request
*/
public Get get(java.lang.Long advertiserId) throws java.io.IOException {
Get result = new Get(advertiserId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Advertiser> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets an advertiser.
*
* Create a request for the method "advertisers.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser to fetch.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Advertiser.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser to fetch. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser to fetch.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser to fetch. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists advertisers that are accessible to the current user. The order is defined by the order_by
* parameter. A single partner_id is required. Cross-partner listing is not supported.
*
* Create a request for the method "advertisers.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListAdvertisersResponse> {
private static final String REST_PATH = "v1/advertisers";
/**
* Lists advertisers that are accessible to the current user. The order is defined by the order_by
* parameter. A single partner_id is required. Cross-partner listing is not supported.
*
* Create a request for the method "advertisers.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListAdvertisersResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Allows filtering by advertiser properties. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `advertiserId` - `displayName` - `entityStatus` Examples: * All active advertisers under a
* partner: `entityStatus="ENTITY_STATUS_ACTIVE"` The length of this field should be no more
* than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by advertiser properties. Supported syntax: * Filter expressions are made up of
one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A
sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator}
{value}`. * The operator must be `EQUALS (=)`. * Supported fields: - `advertiserId` - `displayName`
- `entityStatus` Examples: * All active advertisers under a partner:
`entityStatus="ENTITY_STATUS_ACTIVE"` The length of this field should be no more than 500
characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by advertiser properties. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `advertiserId` - `displayName` - `entityStatus` Examples: * All active advertisers under a
* partner: `entityStatus="ENTITY_STATUS_ACTIVE"` The length of this field should be no more
* than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `entityStatus` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. For example, `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` (default) * `entityStatus`
The default sorting order is ascending. To specify descending order for a field, a suffix "desc"
should be added to the field name. For example, `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `entityStatus` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. For example, `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListAdvertisers` method. If
* not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListAdvertisers` method. If not specified, the
first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListAdvertisers` method. If
* not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/**
* Required. The ID of the partner that the fetched advertisers should all belong to. The
* system only supports listing advertisers for one partner at a time.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The ID of the partner that the fetched advertisers should all belong to. The system only
supports listing advertisers for one partner at a time.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* Required. The ID of the partner that the fetched advertisers should all belong to. The
* system only supports listing advertisers for one partner at a time.
*/
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates an existing advertiser. Returns the updated advertiser if successful.
*
* Create a request for the method "advertisers.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param advertiserId Output only. The unique ID of the advertiser. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Advertiser}
* @return the request
*/
public Patch patch(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.Advertiser content) throws java.io.IOException {
Patch result = new Patch(advertiserId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Advertiser> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates an existing advertiser. Returns the updated advertiser if successful.
*
* Create a request for the method "advertisers.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Output only. The unique ID of the advertiser. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Advertiser}
* @since 1.13
*/
protected Patch(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.Advertiser content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Advertiser.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the advertiser. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Output only. The unique ID of the advertiser. Assigned by the system.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Output only. The unique ID of the advertiser. Assigned by the system. */
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Assets collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Assets.List request = displayvideo.assets().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Assets assets() {
return new Assets();
}
/**
* The "assets" collection of methods.
*/
public class Assets {
/**
* Uploads an asset. Returns the ID of the newly uploaded asset if successful. The asset file size
* should be no more than 10 MB for images, 200 MB for ZIP files, and 1 GB for videos.
*
* Create a request for the method "assets.upload".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Upload#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser this asset belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.CreateAssetRequest}
* @return the request
*/
public Upload upload(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.CreateAssetRequest content) throws java.io.IOException {
Upload result = new Upload(advertiserId, content);
initialize(result);
return result;
}
/**
* Uploads an asset. Returns the ID of the newly uploaded asset if successful. The asset file size
* should be no more than 10 MB for images, 200 MB for ZIP files, and 1 GB for videos.
*
* Create a request for the method "assets.upload".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
*
* <p>
* This method should be used for uploading media content.
* </p>
*
* @param advertiserId Required. The ID of the advertiser this asset belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.CreateAssetRequest} media metadata or {@code null} if none
* @param mediaContent The media HTTP content or {@code null} if none.
* @return the request
* @throws java.io.IOException if the initialization of the request fails
*/
public Upload upload(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.CreateAssetRequest content, com.google.api.client.http.AbstractInputStreamContent mediaContent) throws java.io.IOException {
Upload result = new Upload(advertiserId, content, mediaContent);
initialize(result);
return result;
}
public class Upload extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.CreateAssetResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/assets";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Uploads an asset. Returns the ID of the newly uploaded asset if successful. The asset file size
* should be no more than 10 MB for images, 200 MB for ZIP files, and 1 GB for videos.
*
* Create a request for the method "assets.upload".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
* <p> {@link
* Upload#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser this asset belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.CreateAssetRequest}
* @since 1.13
*/
protected Upload(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.CreateAssetRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.CreateAssetResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
/**
* Uploads an asset. Returns the ID of the newly uploaded asset if successful. The asset file size
* should be no more than 10 MB for images, 200 MB for ZIP files, and 1 GB for videos.
*
* Create a request for the method "assets.upload".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Upload#execute()} method to invoke the remote operation.
* <p> {@link
* Upload#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* <p>
* This constructor should be used for uploading media content.
* </p>
*
* @param advertiserId Required. The ID of the advertiser this asset belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.CreateAssetRequest} media metadata or {@code null} if none
* @param mediaContent The media HTTP content or {@code null} if none.
* @since 1.13
*/
protected Upload(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.CreateAssetRequest content, com.google.api.client.http.AbstractInputStreamContent mediaContent) {
super(DisplayVideo.this, "POST", "/upload/" + getServicePath() + REST_PATH, content, com.google.api.services.displayvideo.v1.model.CreateAssetResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
initializeMediaUpload(mediaContent);
}
@Override
public Upload set$Xgafv(java.lang.String $Xgafv) {
return (Upload) super.set$Xgafv($Xgafv);
}
@Override
public Upload setAccessToken(java.lang.String accessToken) {
return (Upload) super.setAccessToken(accessToken);
}
@Override
public Upload setAlt(java.lang.String alt) {
return (Upload) super.setAlt(alt);
}
@Override
public Upload setCallback(java.lang.String callback) {
return (Upload) super.setCallback(callback);
}
@Override
public Upload setFields(java.lang.String fields) {
return (Upload) super.setFields(fields);
}
@Override
public Upload setKey(java.lang.String key) {
return (Upload) super.setKey(key);
}
@Override
public Upload setOauthToken(java.lang.String oauthToken) {
return (Upload) super.setOauthToken(oauthToken);
}
@Override
public Upload setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Upload) super.setPrettyPrint(prettyPrint);
}
@Override
public Upload setQuotaUser(java.lang.String quotaUser) {
return (Upload) super.setQuotaUser(quotaUser);
}
@Override
public Upload setUploadType(java.lang.String uploadType) {
return (Upload) super.setUploadType(uploadType);
}
@Override
public Upload setUploadProtocol(java.lang.String uploadProtocol) {
return (Upload) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser this asset belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser this asset belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser this asset belongs to. */
public Upload setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Upload set(String parameterName, Object value) {
return (Upload) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Campaigns collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Campaigns.List request = displayvideo.campaigns().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Campaigns campaigns() {
return new Campaigns();
}
/**
* The "campaigns" collection of methods.
*/
public class Campaigns {
/**
* Creates a new campaign. Returns the newly created campaign if successful.
*
* Create a request for the method "campaigns.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Output only. The unique ID of the advertiser the campaign belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Campaign}
* @return the request
*/
public Create create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.Campaign content) throws java.io.IOException {
Create result = new Create(advertiserId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Campaign> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/campaigns";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new campaign. Returns the newly created campaign if successful.
*
* Create a request for the method "campaigns.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Output only. The unique ID of the advertiser the campaign belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Campaign}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.Campaign content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Campaign.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the advertiser the campaign belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Output only. The unique ID of the advertiser the campaign belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Output only. The unique ID of the advertiser the campaign belongs to. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Permanently deletes a campaign. A deleted campaign cannot be recovered. The campaign should be
* archived first, i.e. set entity_status to `ENTITY_STATUS_ARCHIVED`, to be able to delete it.
*
* Create a request for the method "campaigns.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser this campaign belongs to.
* @param campaignId The ID of the campaign we need to delete.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.Long campaignId) throws java.io.IOException {
Delete result = new Delete(advertiserId, campaignId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/campaigns/{+campaignId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CAMPAIGN_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Permanently deletes a campaign. A deleted campaign cannot be recovered. The campaign should be
* archived first, i.e. set entity_status to `ENTITY_STATUS_ARCHIVED`, to be able to delete it.
*
* Create a request for the method "campaigns.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser this campaign belongs to.
* @param campaignId The ID of the campaign we need to delete.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.Long campaignId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.campaignId = com.google.api.client.util.Preconditions.checkNotNull(campaignId, "Required parameter campaignId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser this campaign belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser this campaign belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser this campaign belongs to. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** The ID of the campaign we need to delete. */
@com.google.api.client.util.Key
private java.lang.Long campaignId;
/** The ID of the campaign we need to delete.
*/
public java.lang.Long getCampaignId() {
return campaignId;
}
/** The ID of the campaign we need to delete. */
public Delete setCampaignId(java.lang.Long campaignId) {
this.campaignId = campaignId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets a campaign.
*
* Create a request for the method "campaigns.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser this campaign belongs to.
* @param campaignId Required. The ID of the campaign to fetch.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.Long campaignId) throws java.io.IOException {
Get result = new Get(advertiserId, campaignId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Campaign> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/campaigns/{+campaignId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CAMPAIGN_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a campaign.
*
* Create a request for the method "campaigns.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser this campaign belongs to.
* @param campaignId Required. The ID of the campaign to fetch.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.Long campaignId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Campaign.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.campaignId = com.google.api.client.util.Preconditions.checkNotNull(campaignId, "Required parameter campaignId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser this campaign belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser this campaign belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser this campaign belongs to. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the campaign to fetch. */
@com.google.api.client.util.Key
private java.lang.Long campaignId;
/** Required. The ID of the campaign to fetch.
*/
public java.lang.Long getCampaignId() {
return campaignId;
}
/** Required. The ID of the campaign to fetch. */
public Get setCampaignId(java.lang.Long campaignId) {
this.campaignId = campaignId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists campaigns in an advertiser. The order is defined by the order_by parameter. If a filter by
* entity_status is not specified, campaigns with `ENTITY_STATUS_ARCHIVED` will not be included in
* the results.
*
* Create a request for the method "campaigns.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser to list campaigns for.
* @return the request
*/
public List list(java.lang.Long advertiserId) throws java.io.IOException {
List result = new List(advertiserId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListCampaignsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/campaigns";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists campaigns in an advertiser. The order is defined by the order_by parameter. If a filter
* by entity_status is not specified, campaigns with `ENTITY_STATUS_ARCHIVED` will not be included
* in the results.
*
* Create a request for the method "campaigns.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser to list campaigns for.
* @since 1.13
*/
protected List(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListCampaignsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser to list campaigns for. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser to list campaigns for.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser to list campaigns for. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by campaign properties. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator used on `updateTime` must be `GREATER
* THAN OR EQUAL TO (>=)` or `LESS THAN OR EQUAL TO (<=)`. * The operator must be `EQUALS
* (=)`. * Supported fields: - `campaignId` - `displayName` - `entityStatus` - `updateTime`
* (input in ISO 8601 format, or YYYY-MM-DDTHH:MM:SSZ) Examples: * All
* `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED` campaigns under an advertiser:
* `(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED")` * All
* campaigns with an update time less than or equal to `2020-11-04T18:54:47Z (format of ISO
* 8601)`: `updateTime<="2020-11-04T18:54:47Z"` * All campaigns with an update time greater
* than or equal to `2020-11-04T18:54:47Z (format of ISO 8601)`:
* `updateTime>="2020-11-04T18:54:47Z"` The length of this field should be no more than 500
* characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by campaign properties. Supported syntax: * Filter expressions are made up of one
or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence
of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator}
{value}`. * The operator used on `updateTime` must be `GREATER THAN OR EQUAL TO (>=)` or `LESS THAN
OR EQUAL TO (<=)`. * The operator must be `EQUALS (=)`. * Supported fields: - `campaignId` -
`displayName` - `entityStatus` - `updateTime` (input in ISO 8601 format, or YYYY-MM-DDTHH:MM:SSZ)
Examples: * All `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED` campaigns under an advertiser:
`(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED")` * All campaigns with
an update time less than or equal to `2020-11-04T18:54:47Z (format of ISO 8601)`:
`updateTime<="2020-11-04T18:54:47Z"` * All campaigns with an update time greater than or equal to
`2020-11-04T18:54:47Z (format of ISO 8601)`: `updateTime>="2020-11-04T18:54:47Z"` The length of
this field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by campaign properties. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator used on `updateTime` must be `GREATER
* THAN OR EQUAL TO (>=)` or `LESS THAN OR EQUAL TO (<=)`. * The operator must be `EQUALS
* (=)`. * Supported fields: - `campaignId` - `displayName` - `entityStatus` - `updateTime`
* (input in ISO 8601 format, or YYYY-MM-DDTHH:MM:SSZ) Examples: * All
* `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED` campaigns under an advertiser:
* `(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED")` * All
* campaigns with an update time less than or equal to `2020-11-04T18:54:47Z (format of ISO
* 8601)`: `updateTime<="2020-11-04T18:54:47Z"` * All campaigns with an update time greater
* than or equal to `2020-11-04T18:54:47Z (format of ISO 8601)`:
* `updateTime>="2020-11-04T18:54:47Z"` The length of this field should be no more than 500
* characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `entityStatus` * `updateTime` The default sorting order is ascending. To specify
* descending order for a field, a suffix "desc" should be added to the field name. Example:
* `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` (default) * `entityStatus`
* `updateTime` The default sorting order is ascending. To specify descending order for a field, a
suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `entityStatus` * `updateTime` The default sorting order is ascending. To specify
* descending order for a field, a suffix "desc" should be added to the field name. Example:
* `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCampaigns` method. If
* not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListCampaigns` method. If not specified, the
first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCampaigns` method. If
* not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates an existing campaign. Returns the updated campaign if successful.
*
* Create a request for the method "campaigns.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param advertiserId Output only. The unique ID of the advertiser the campaign belongs to.
* @param campaignId Output only. The unique ID of the campaign. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Campaign}
* @return the request
*/
public Patch patch(java.lang.Long advertiserId, java.lang.Long campaignId, com.google.api.services.displayvideo.v1.model.Campaign content) throws java.io.IOException {
Patch result = new Patch(advertiserId, campaignId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Campaign> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/campaigns/{+campaignId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CAMPAIGN_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates an existing campaign. Returns the updated campaign if successful.
*
* Create a request for the method "campaigns.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Output only. The unique ID of the advertiser the campaign belongs to.
* @param campaignId Output only. The unique ID of the campaign. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Campaign}
* @since 1.13
*/
protected Patch(java.lang.Long advertiserId, java.lang.Long campaignId, com.google.api.services.displayvideo.v1.model.Campaign content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Campaign.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.campaignId = com.google.api.client.util.Preconditions.checkNotNull(campaignId, "Required parameter campaignId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the advertiser the campaign belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Output only. The unique ID of the advertiser the campaign belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Output only. The unique ID of the advertiser the campaign belongs to. */
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Output only. The unique ID of the campaign. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long campaignId;
/** Output only. The unique ID of the campaign. Assigned by the system.
*/
public java.lang.Long getCampaignId() {
return campaignId;
}
/** Output only. The unique ID of the campaign. Assigned by the system. */
public Patch setCampaignId(java.lang.Long campaignId) {
this.campaignId = campaignId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Channels collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Channels.List request = displayvideo.channels().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Channels channels() {
return new Channels();
}
/**
* The "channels" collection of methods.
*/
public class Channels {
/**
* Creates a new channel. Returns the newly created channel if successful.
*
* Create a request for the method "channels.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser that owns the created channel.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Channel}
* @return the request
*/
public Create create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.Channel content) throws java.io.IOException {
Create result = new Create(advertiserId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Channel> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/channels";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new channel. Returns the newly created channel if successful.
*
* Create a request for the method "channels.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser that owns the created channel.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Channel}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.Channel content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Channel.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that owns the created channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the created channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the created channel. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** The ID of the partner that owns the created channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the created channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the created channel. */
public Create setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Gets a channel for a partner or advertiser.
*
* Create a request for the method "channels.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser that owns the fetched channel.
* @param channelId Required. The ID of the channel to fetch.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.Long channelId) throws java.io.IOException {
Get result = new Get(advertiserId, channelId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Channel> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/channels/{+channelId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a channel for a partner or advertiser.
*
* Create a request for the method "channels.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser that owns the fetched channel.
* @param channelId Required. The ID of the channel to fetch.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.Long channelId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Channel.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that owns the fetched channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the fetched channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the fetched channel. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the channel to fetch. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the channel to fetch.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the channel to fetch. */
public Get setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/** The ID of the partner that owns the fetched channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the fetched channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the fetched channel. */
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists channels for a partner or advertiser.
*
* Create a request for the method "channels.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser that owns the channels.
* @return the request
*/
public List list(java.lang.Long advertiserId) throws java.io.IOException {
List result = new List(advertiserId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListChannelsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/channels";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists channels for a partner or advertiser.
*
* Create a request for the method "channels.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser that owns the channels.
* @since 1.13
*/
protected List(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListChannelsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that owns the channels. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the channels.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the channels. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by channel fields. Supported syntax: * Filter expressions for channel
* currently can only contain at most one * restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields:
* - `displayName` Examples: * All channels for which the display name contains "google":
* `displayName : "google"`. The length of this field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by channel fields. Supported syntax: * Filter expressions for channel currently
can only contain at most one * restriction. * A restriction has the form of `{field} {operator}
{value}`. * The operator must be `CONTAINS (:)`. * Supported fields: - `displayName` Examples: *
All channels for which the display name contains "google": `displayName : "google"`. The length of
this field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by channel fields. Supported syntax: * Filter expressions for channel
* currently can only contain at most one * restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields:
* - `displayName` Examples: * All channels for which the display name contains "google":
* `displayName : "google"`. The length of this field should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `channelId` The default sorting order is ascending. To specify descending order for a
* field, a suffix " desc" should be added to the field name. Example: `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` (default) * `channelId` The
default sorting order is ascending. To specify descending order for a field, a suffix " desc"
should be added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `channelId` The default sorting order is ascending. To specify descending order for a
* field, a suffix " desc" should be added to the field name. Example: `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListChannels` method. If not
* specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListChannels` method. If not specified, the
first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListChannels` method. If not
* specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** The ID of the partner that owns the channels. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the channels.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the channels. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates a channel. Returns the updated channel if successful.
*
* Create a request for the method "channels.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser that owns the created channel.
* @param channelId Output only. The unique ID of the channel. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Channel}
* @return the request
*/
public Patch patch(java.lang.Long advertiserId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.Channel content) throws java.io.IOException {
Patch result = new Patch(advertiserId, channelId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Channel> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/channels/{channelId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates a channel. Returns the updated channel if successful.
*
* Create a request for the method "channels.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser that owns the created channel.
* @param channelId Output only. The unique ID of the channel. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Channel}
* @since 1.13
*/
protected Patch(java.lang.Long advertiserId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.Channel content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Channel.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that owns the created channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the created channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the created channel. */
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Output only. The unique ID of the channel. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Output only. The unique ID of the channel. Assigned by the system.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Output only. The unique ID of the channel. Assigned by the system. */
public Patch setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/** The ID of the partner that owns the created channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the created channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the created channel. */
public Patch setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Sites collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Sites.List request = displayvideo.sites().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Sites sites() {
return new Sites();
}
/**
* The "sites" collection of methods.
*/
public class Sites {
/**
* Bulk edits sites under a single channel. The operation will delete the sites provided in
* BulkEditSitesRequest.deleted_sites and then create the sites provided in
* BulkEditSitesRequest.created_sites.
*
* Create a request for the method "sites.bulkEdit".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the sites belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditSitesRequest}
* @return the request
*/
public BulkEdit bulkEdit(java.lang.Long advertiserId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.BulkEditSitesRequest content) throws java.io.IOException {
BulkEdit result = new BulkEdit(advertiserId, channelId, content);
initialize(result);
return result;
}
public class BulkEdit extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkEditSitesResponse> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/channels/{+channelId}/sites:bulkEdit";
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Bulk edits sites under a single channel. The operation will delete the sites provided in
* BulkEditSitesRequest.deleted_sites and then create the sites provided in
* BulkEditSitesRequest.created_sites.
*
* Create a request for the method "sites.bulkEdit".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
* <p> {@link
* BulkEdit#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the sites belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditSitesRequest}
* @since 1.13
*/
protected BulkEdit(java.lang.Long advertiserId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.BulkEditSitesRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.BulkEditSitesResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public BulkEdit set$Xgafv(java.lang.String $Xgafv) {
return (BulkEdit) super.set$Xgafv($Xgafv);
}
@Override
public BulkEdit setAccessToken(java.lang.String accessToken) {
return (BulkEdit) super.setAccessToken(accessToken);
}
@Override
public BulkEdit setAlt(java.lang.String alt) {
return (BulkEdit) super.setAlt(alt);
}
@Override
public BulkEdit setCallback(java.lang.String callback) {
return (BulkEdit) super.setCallback(callback);
}
@Override
public BulkEdit setFields(java.lang.String fields) {
return (BulkEdit) super.setFields(fields);
}
@Override
public BulkEdit setKey(java.lang.String key) {
return (BulkEdit) super.setKey(key);
}
@Override
public BulkEdit setOauthToken(java.lang.String oauthToken) {
return (BulkEdit) super.setOauthToken(oauthToken);
}
@Override
public BulkEdit setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkEdit) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkEdit setQuotaUser(java.lang.String quotaUser) {
return (BulkEdit) super.setQuotaUser(quotaUser);
}
@Override
public BulkEdit setUploadType(java.lang.String uploadType) {
return (BulkEdit) super.setUploadType(uploadType);
}
@Override
public BulkEdit setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkEdit) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the parent channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the parent channel. */
public BulkEdit setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the parent channel to which the sites belong. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the parent channel to which the sites belong.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the parent channel to which the sites belong. */
public BulkEdit setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
@Override
public BulkEdit set(String parameterName, Object value) {
return (BulkEdit) super.set(parameterName, value);
}
}
/**
* Creates a site in a channel.
*
* Create a request for the method "sites.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser that owns the parent channel.
* @param channelId Required. The ID of the parent channel in which the site will be created.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Site}
* @return the request
*/
public Create create(java.lang.Long advertiserId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.Site content) throws java.io.IOException {
Create result = new Create(advertiserId, channelId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Site> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/channels/{+channelId}/sites";
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a site in a channel.
*
* Create a request for the method "sites.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser that owns the parent channel.
* @param channelId Required. The ID of the parent channel in which the site will be created.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Site}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.Site content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Site.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the parent channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the parent channel. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the parent channel in which the site will be created. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the parent channel in which the site will be created.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the parent channel in which the site will be created. */
public Create setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/** The ID of the partner that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the parent channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the parent channel. */
public Create setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a site from a channel.
*
* Create a request for the method "sites.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the site belongs.
* @param urlOrAppId Required. The URL or app ID of the site to delete.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.Long channelId, java.lang.String urlOrAppId) throws java.io.IOException {
Delete result = new Delete(advertiserId, channelId, urlOrAppId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/channels/{+channelId}/sites/{+urlOrAppId}";
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern URL_OR_APP_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes a site from a channel.
*
* Create a request for the method "sites.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the site belongs.
* @param urlOrAppId Required. The URL or app ID of the site to delete.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.Long channelId, java.lang.String urlOrAppId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
this.urlOrAppId = com.google.api.client.util.Preconditions.checkNotNull(urlOrAppId, "Required parameter urlOrAppId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(URL_OR_APP_ID_PATTERN.matcher(urlOrAppId).matches(),
"Parameter urlOrAppId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the parent channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the parent channel. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the parent channel to which the site belongs. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the parent channel to which the site belongs.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the parent channel to which the site belongs. */
public Delete setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/** Required. The URL or app ID of the site to delete. */
@com.google.api.client.util.Key
private java.lang.String urlOrAppId;
/** Required. The URL or app ID of the site to delete.
*/
public java.lang.String getUrlOrAppId() {
return urlOrAppId;
}
/** Required. The URL or app ID of the site to delete. */
public Delete setUrlOrAppId(java.lang.String urlOrAppId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(URL_OR_APP_ID_PATTERN.matcher(urlOrAppId).matches(),
"Parameter urlOrAppId must conform to the pattern " +
"^[^/]+$");
}
this.urlOrAppId = urlOrAppId;
return this;
}
/** The ID of the partner that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the parent channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the parent channel. */
public Delete setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Lists sites in a channel.
*
* Create a request for the method "sites.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the requested sites belong.
* @return the request
*/
public List list(java.lang.Long advertiserId, java.lang.Long channelId) throws java.io.IOException {
List result = new List(advertiserId, channelId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListSitesResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/channels/{+channelId}/sites";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists sites in a channel.
*
* Create a request for the method "sites.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the requested sites belong.
* @since 1.13
*/
protected List(java.lang.Long advertiserId, java.lang.Long channelId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListSitesResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the parent channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the parent channel. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the parent channel to which the requested sites belong. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the parent channel to which the requested sites belong.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the parent channel to which the requested sites belong. */
public List setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/**
* Allows filtering by site fields. Supported syntax: * Filter expressions for site
* currently can only contain at most one * restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported
* fields: - `urlOrAppId` Examples: * All sites for which the URL or app ID contains
* "google": `urlOrAppId : "google"`
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by site fields. Supported syntax: * Filter expressions for site currently can only
contain at most one * restriction. * A restriction has the form of `{field} {operator} {value}`. *
The operator must be `CONTAINS (:)`. * Supported fields: - `urlOrAppId` Examples: * All sites for
which the URL or app ID contains "google": `urlOrAppId : "google"`
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by site fields. Supported syntax: * Filter expressions for site
* currently can only contain at most one * restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported
* fields: - `urlOrAppId` Examples: * All sites for which the URL or app ID contains
* "google": `urlOrAppId : "google"`
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `urlOrAppId` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix "
* desc" should be added to the field name. Example: `urlOrAppId desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `urlOrAppId` (default) The default
sorting order is ascending. To specify descending order for a field, a suffix " desc" should be
added to the field name. Example: `urlOrAppId desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `urlOrAppId` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix "
* desc" should be added to the field name. Example: `urlOrAppId desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListSites` method. If not
* specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListSites` method. If not specified, the first
page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListSites` method. If not
* specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** The ID of the partner that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the parent channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the parent channel. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the Creatives collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Creatives.List request = displayvideo.creatives().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Creatives creatives() {
return new Creatives();
}
/**
* The "creatives" collection of methods.
*/
public class Creatives {
/**
* Creates a new creative. Returns the newly created creative if successful.
*
* Create a request for the method "creatives.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Output only. The unique ID of the advertiser the creative belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Creative}
* @return the request
*/
public Create create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.Creative content) throws java.io.IOException {
Create result = new Create(advertiserId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Creative> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/creatives";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new creative. Returns the newly created creative if successful.
*
* Create a request for the method "creatives.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Output only. The unique ID of the advertiser the creative belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Creative}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.Creative content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Creative.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the advertiser the creative belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Output only. The unique ID of the advertiser the creative belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Output only. The unique ID of the advertiser the creative belongs to. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a creative. Returns error code `NOT_FOUND` if the creative does not exist. The creative
* should be archived first, i.e. set entity_status to `ENTITY_STATUS_ARCHIVED`, before it can be
* deleted.
*
* Create a request for the method "creatives.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser this creative belongs to.
* @param creativeId The ID of the creative to be deleted.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.Long creativeId) throws java.io.IOException {
Delete result = new Delete(advertiserId, creativeId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/creatives/{+creativeId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CREATIVE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes a creative. Returns error code `NOT_FOUND` if the creative does not exist. The creative
* should be archived first, i.e. set entity_status to `ENTITY_STATUS_ARCHIVED`, before it can be
* deleted.
*
* Create a request for the method "creatives.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser this creative belongs to.
* @param creativeId The ID of the creative to be deleted.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.Long creativeId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.creativeId = com.google.api.client.util.Preconditions.checkNotNull(creativeId, "Required parameter creativeId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser this creative belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser this creative belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser this creative belongs to. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** The ID of the creative to be deleted. */
@com.google.api.client.util.Key
private java.lang.Long creativeId;
/** The ID of the creative to be deleted.
*/
public java.lang.Long getCreativeId() {
return creativeId;
}
/** The ID of the creative to be deleted. */
public Delete setCreativeId(java.lang.Long creativeId) {
this.creativeId = creativeId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets a creative.
*
* Create a request for the method "creatives.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser this creative belongs to.
* @param creativeId Required. The ID of the creative to fetch.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.Long creativeId) throws java.io.IOException {
Get result = new Get(advertiserId, creativeId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Creative> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/creatives/{+creativeId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CREATIVE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a creative.
*
* Create a request for the method "creatives.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser this creative belongs to.
* @param creativeId Required. The ID of the creative to fetch.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.Long creativeId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Creative.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.creativeId = com.google.api.client.util.Preconditions.checkNotNull(creativeId, "Required parameter creativeId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser this creative belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser this creative belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser this creative belongs to. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the creative to fetch. */
@com.google.api.client.util.Key
private java.lang.Long creativeId;
/** Required. The ID of the creative to fetch.
*/
public java.lang.Long getCreativeId() {
return creativeId;
}
/** Required. The ID of the creative to fetch. */
public Get setCreativeId(java.lang.Long creativeId) {
this.creativeId = creativeId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists creatives in an advertiser. The order is defined by the order_by parameter. If a filter by
* entity_status is not specified, creatives with `ENTITY_STATUS_ARCHIVED` will not be included in
* the results.
*
* Create a request for the method "creatives.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser to list creatives for.
* @return the request
*/
public List list(java.lang.Long advertiserId) throws java.io.IOException {
List result = new List(advertiserId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListCreativesResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/creatives";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists creatives in an advertiser. The order is defined by the order_by parameter. If a filter
* by entity_status is not specified, creatives with `ENTITY_STATUS_ARCHIVED` will not be included
* in the results.
*
* Create a request for the method "creatives.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser to list creatives for.
* @since 1.13
*/
protected List(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListCreativesResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser to list creatives for. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser to list creatives for.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser to list creatives for. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by creative properties. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restriction for the same field must be combined by
* `OR`. * Restriction for different fields must be combined by `AND`. * Between `(` and `)`
* there can only be restrictions combined by `OR` for the same field. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `EQUALS (=)` for the
* following fields: - `entityStatus` - `creativeType`. - `dimensions` - `minDuration` -
* `maxDuration` - `approvalStatus` - `exchangeReviewStatus` - `dynamic` - `creativeId` *
* The operator must be `HAS (:)` for the following fields: - `lineItemIds` * For
* `entityStatus`, `minDuration`, `maxDuration`, and `dynamic` there may be at most one
* restriction. * For `dimensions`, the value is in the form of `"{width}x{height}"`. * For
* `exchangeReviewStatus`, the value is in the form of `{exchange}-{reviewStatus}`. * For
* `minDuration` and `maxDuration`, the value is in the form of `"{duration}s"`. Only
* seconds are supported with millisecond granularity. * There may be multiple `lineItemIds`
* restrictions in order to search against multiple possible line item IDs. * There may be
* multiple `creativeId` restrictions in order to search against multiple possible creative
* IDs. Examples: * All native creatives: `creativeType="CREATIVE_TYPE_NATIVE"` * All active
* creatives with 300x400 or 50x100 dimensions: `entityStatus="ENTITY_STATUS_ACTIVE" AND
* (dimensions="300x400" OR dimensions="50x100")` * All dynamic creatives that are approved
* by AdX or AppNexus, with a minimum duration of 5 seconds and 200ms. `dynamic="true" AND
* minDuration="5.2s" AND (exchangeReviewStatus="EXCHANGE_GOOGLE_AD_MANAGER-
* REVIEW_STATUS_APPROVED" OR exchangeReviewStatus="EXCHANGE_APPNEXUS-
* REVIEW_STATUS_APPROVED")` * All video creatives that are associated with line item ID 1
* or 2: `creativeType="CREATIVE_TYPE_VIDEO" AND (lineItemIds:1 OR lineItemIds:2)` * Find
* creatives by multiple creative IDs: `creativeId=1 OR creativeId=2` The length of this
* field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by creative properties. Supported syntax: * Filter expressions are made up of one
or more restrictions. * Restriction for the same field must be combined by `OR`. * Restriction for
different fields must be combined by `AND`. * Between `(` and `)` there can only be restrictions
combined by `OR` for the same field. * A restriction has the form of `{field} {operator} {value}`.
* The operator must be `EQUALS (=)` for the following fields: - `entityStatus` - `creativeType`. -
`dimensions` - `minDuration` - `maxDuration` - `approvalStatus` - `exchangeReviewStatus` -
`dynamic` - `creativeId` * The operator must be `HAS (:)` for the following fields: - `lineItemIds`
* For `entityStatus`, `minDuration`, `maxDuration`, and `dynamic` there may be at most one
restriction. * For `dimensions`, the value is in the form of `"{width}x{height}"`. * For
`exchangeReviewStatus`, the value is in the form of `{exchange}-{reviewStatus}`. * For
`minDuration` and `maxDuration`, the value is in the form of `"{duration}s"`. Only seconds are
supported with millisecond granularity. * There may be multiple `lineItemIds` restrictions in order
to search against multiple possible line item IDs. * There may be multiple `creativeId`
restrictions in order to search against multiple possible creative IDs. Examples: * All native
creatives: `creativeType="CREATIVE_TYPE_NATIVE"` * All active creatives with 300x400 or 50x100
dimensions: `entityStatus="ENTITY_STATUS_ACTIVE" AND (dimensions="300x400" OR dimensions="50x100")`
* All dynamic creatives that are approved by AdX or AppNexus, with a minimum duration of 5 seconds
and 200ms. `dynamic="true" AND minDuration="5.2s" AND (exchangeReviewStatus
="EXCHANGE_GOOGLE_AD_MANAGER-REVIEW_STATUS_APPROVED" OR exchangeReviewStatus="EXCHANGE_APPNEXUS-
REVIEW_STATUS_APPROVED")` * All video creatives that are associated with line item ID 1 or 2:
`creativeType="CREATIVE_TYPE_VIDEO" AND (lineItemIds:1 OR lineItemIds:2)` * Find creatives by
multiple creative IDs: `creativeId=1 OR creativeId=2` The length of this field should be no more
than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by creative properties. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restriction for the same field must be combined by
* `OR`. * Restriction for different fields must be combined by `AND`. * Between `(` and `)`
* there can only be restrictions combined by `OR` for the same field. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `EQUALS (=)` for the
* following fields: - `entityStatus` - `creativeType`. - `dimensions` - `minDuration` -
* `maxDuration` - `approvalStatus` - `exchangeReviewStatus` - `dynamic` - `creativeId` *
* The operator must be `HAS (:)` for the following fields: - `lineItemIds` * For
* `entityStatus`, `minDuration`, `maxDuration`, and `dynamic` there may be at most one
* restriction. * For `dimensions`, the value is in the form of `"{width}x{height}"`. * For
* `exchangeReviewStatus`, the value is in the form of `{exchange}-{reviewStatus}`. * For
* `minDuration` and `maxDuration`, the value is in the form of `"{duration}s"`. Only
* seconds are supported with millisecond granularity. * There may be multiple `lineItemIds`
* restrictions in order to search against multiple possible line item IDs. * There may be
* multiple `creativeId` restrictions in order to search against multiple possible creative
* IDs. Examples: * All native creatives: `creativeType="CREATIVE_TYPE_NATIVE"` * All active
* creatives with 300x400 or 50x100 dimensions: `entityStatus="ENTITY_STATUS_ACTIVE" AND
* (dimensions="300x400" OR dimensions="50x100")` * All dynamic creatives that are approved
* by AdX or AppNexus, with a minimum duration of 5 seconds and 200ms. `dynamic="true" AND
* minDuration="5.2s" AND (exchangeReviewStatus="EXCHANGE_GOOGLE_AD_MANAGER-
* REVIEW_STATUS_APPROVED" OR exchangeReviewStatus="EXCHANGE_APPNEXUS-
* REVIEW_STATUS_APPROVED")` * All video creatives that are associated with line item ID 1
* or 2: `creativeType="CREATIVE_TYPE_VIDEO" AND (lineItemIds:1 OR lineItemIds:2)` * Find
* creatives by multiple creative IDs: `creativeId=1 OR creativeId=2` The length of this
* field should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `creativeId` (default) *
* `createTime` * `mediaDuration` * `dimensions` (sorts by width first, then by height) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `createTime desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `creativeId` (default) * `createTime` *
`mediaDuration` * `dimensions` (sorts by width first, then by height) The default sorting order is
ascending. To specify descending order for a field, a suffix "desc" should be added to the field
name. Example: `createTime desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `creativeId` (default) *
* `createTime` * `mediaDuration` * `dimensions` (sorts by width first, then by height) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `createTime desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCreatives` method. If
* not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListCreatives` method. If not specified, the
first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCreatives` method. If
* not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates an existing creative. Returns the updated creative if successful.
*
* Create a request for the method "creatives.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param advertiserId Output only. The unique ID of the advertiser the creative belongs to.
* @param creativeId Output only. The unique ID of the creative. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Creative}
* @return the request
*/
public Patch patch(java.lang.Long advertiserId, java.lang.Long creativeId, com.google.api.services.displayvideo.v1.model.Creative content) throws java.io.IOException {
Patch result = new Patch(advertiserId, creativeId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Creative> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/creatives/{+creativeId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CREATIVE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates an existing creative. Returns the updated creative if successful.
*
* Create a request for the method "creatives.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Output only. The unique ID of the advertiser the creative belongs to.
* @param creativeId Output only. The unique ID of the creative. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Creative}
* @since 1.13
*/
protected Patch(java.lang.Long advertiserId, java.lang.Long creativeId, com.google.api.services.displayvideo.v1.model.Creative content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Creative.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.creativeId = com.google.api.client.util.Preconditions.checkNotNull(creativeId, "Required parameter creativeId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the advertiser the creative belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Output only. The unique ID of the advertiser the creative belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Output only. The unique ID of the advertiser the creative belongs to. */
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Output only. The unique ID of the creative. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long creativeId;
/** Output only. The unique ID of the creative. Assigned by the system.
*/
public java.lang.Long getCreativeId() {
return creativeId;
}
/** Output only. The unique ID of the creative. Assigned by the system. */
public Patch setCreativeId(java.lang.Long creativeId) {
this.creativeId = creativeId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the InsertionOrders collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.InsertionOrders.List request = displayvideo.insertionOrders().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public InsertionOrders insertionOrders() {
return new InsertionOrders();
}
/**
* The "insertionOrders" collection of methods.
*/
public class InsertionOrders {
/**
* Creates a new insertion order. Returns the newly created insertion order if successful.
*
* Create a request for the method "insertionOrders.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Output only. The unique ID of the advertiser the insertion order belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.InsertionOrder}
* @return the request
*/
public Create create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.InsertionOrder content) throws java.io.IOException {
Create result = new Create(advertiserId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.InsertionOrder> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/insertionOrders";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new insertion order. Returns the newly created insertion order if successful.
*
* Create a request for the method "insertionOrders.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Output only. The unique ID of the advertiser the insertion order belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.InsertionOrder}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.InsertionOrder content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.InsertionOrder.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the advertiser the insertion order belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Output only. The unique ID of the advertiser the insertion order belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Output only. The unique ID of the advertiser the insertion order belongs to. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes an insertion order. Returns error code `NOT_FOUND` if the insertion order does not exist.
* The insertion order should be archived first, i.e. set entity_status to `ENTITY_STATUS_ARCHIVED`,
* to be able to delete it.
*
* Create a request for the method "insertionOrders.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser this insertion order belongs to.
* @param insertionOrderId The ID of the insertion order we need to delete.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.Long insertionOrderId) throws java.io.IOException {
Delete result = new Delete(advertiserId, insertionOrderId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/insertionOrders/{+insertionOrderId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern INSERTION_ORDER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes an insertion order. Returns error code `NOT_FOUND` if the insertion order does not
* exist. The insertion order should be archived first, i.e. set entity_status to
* `ENTITY_STATUS_ARCHIVED`, to be able to delete it.
*
* Create a request for the method "insertionOrders.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser this insertion order belongs to.
* @param insertionOrderId The ID of the insertion order we need to delete.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.Long insertionOrderId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.insertionOrderId = com.google.api.client.util.Preconditions.checkNotNull(insertionOrderId, "Required parameter insertionOrderId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser this insertion order belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser this insertion order belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser this insertion order belongs to. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** The ID of the insertion order we need to delete. */
@com.google.api.client.util.Key
private java.lang.Long insertionOrderId;
/** The ID of the insertion order we need to delete.
*/
public java.lang.Long getInsertionOrderId() {
return insertionOrderId;
}
/** The ID of the insertion order we need to delete. */
public Delete setInsertionOrderId(java.lang.Long insertionOrderId) {
this.insertionOrderId = insertionOrderId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets an insertion order. Returns error code `NOT_FOUND` if the insertion order does not exist.
*
* Create a request for the method "insertionOrders.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser this insertion order belongs to.
* @param insertionOrderId Required. The ID of the insertion order to fetch.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.Long insertionOrderId) throws java.io.IOException {
Get result = new Get(advertiserId, insertionOrderId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.InsertionOrder> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/insertionOrders/{+insertionOrderId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern INSERTION_ORDER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets an insertion order. Returns error code `NOT_FOUND` if the insertion order does not exist.
*
* Create a request for the method "insertionOrders.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser this insertion order belongs to.
* @param insertionOrderId Required. The ID of the insertion order to fetch.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.Long insertionOrderId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.InsertionOrder.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.insertionOrderId = com.google.api.client.util.Preconditions.checkNotNull(insertionOrderId, "Required parameter insertionOrderId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser this insertion order belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser this insertion order belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser this insertion order belongs to. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the insertion order to fetch. */
@com.google.api.client.util.Key
private java.lang.Long insertionOrderId;
/** Required. The ID of the insertion order to fetch.
*/
public java.lang.Long getInsertionOrderId() {
return insertionOrderId;
}
/** Required. The ID of the insertion order to fetch. */
public Get setInsertionOrderId(java.lang.Long insertionOrderId) {
this.insertionOrderId = insertionOrderId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists insertion orders in an advertiser. The order is defined by the order_by parameter. If a
* filter by entity_status is not specified, insertion orders with `ENTITY_STATUS_ARCHIVED` will not
* be included in the results.
*
* Create a request for the method "insertionOrders.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser to list insertion orders for.
* @return the request
*/
public List list(java.lang.Long advertiserId) throws java.io.IOException {
List result = new List(advertiserId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListInsertionOrdersResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/insertionOrders";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists insertion orders in an advertiser. The order is defined by the order_by parameter. If a
* filter by entity_status is not specified, insertion orders with `ENTITY_STATUS_ARCHIVED` will
* not be included in the results.
*
* Create a request for the method "insertionOrders.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser to list insertion orders for.
* @since 1.13
*/
protected List(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListInsertionOrdersResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser to list insertion orders for. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser to list insertion orders for.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser to list insertion orders for. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by insertion order properties. Supported syntax: * Filter expressions
* are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR`
* logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has
* the form of `{field} {operator} {value}`. * The operator used on
* `budget.budget_segments.date_range.end_date` must be LESS THAN (<). * The operator used
* on `updateTime` must be `GREATER THAN OR EQUAL TO (>=)` or `LESS THAN OR EQUAL TO (<=)`.
* * The operators used on all other fields must be `EQUALS (=)`. * Supported fields: -
* `campaignId` - `displayName` - `entityStatus` -
* `budget.budget_segments.date_range.end_date` (input as YYYY-MM-DD) - `updateTime` (input
* in ISO 8601 format, or YYYY-MM-DDTHH:MM:SSZ) Examples: * All insertion orders under a
* campaign: `campaignId="1234"` * All `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`
* insertion orders under an advertiser: `(entityStatus="ENTITY_STATUS_ACTIVE" OR
* entityStatus="ENTITY_STATUS_PAUSED")` * All insertion orders whose budget segments' dates
* end before March 28, 2019: `budget.budget_segments.date_range.end_date<"2019-03-28"` *
* All insertion orders with an update time less than or equal to `2020-11-04T18:54:47Z
* (format of ISO 8601)`: `updateTime<="2020-11-04T18:54:47Z"` * All insertion orders with
* an update time greater than or equal to `2020-11-04T18:54:47Z (format of ISO 8601)`:
* `updateTime>="2020-11-04T18:54:47Z"` The length of this field should be no more than 500
* characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by insertion order properties. Supported syntax: * Filter expressions are made up
of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A
sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator}
{value}`. * The operator used on `budget.budget_segments.date_range.end_date` must be LESS THAN
(<). * The operator used on `updateTime` must be `GREATER THAN OR EQUAL TO (>=)` or `LESS THAN OR
EQUAL TO (<=)`. * The operators used on all other fields must be `EQUALS (=)`. * Supported fields:
- `campaignId` - `displayName` - `entityStatus` - `budget.budget_segments.date_range.end_date`
(input as YYYY-MM-DD) - `updateTime` (input in ISO 8601 format, or YYYY-MM-DDTHH:MM:SSZ) Examples:
* All insertion orders under a campaign: `campaignId="1234"` * All `ENTITY_STATUS_ACTIVE` or
`ENTITY_STATUS_PAUSED` insertion orders under an advertiser: `(entityStatus="ENTITY_STATUS_ACTIVE"
OR entityStatus="ENTITY_STATUS_PAUSED")` * All insertion orders whose budget segments' dates end
before March 28, 2019: `budget.budget_segments.date_range.end_date<"2019-03-28"` * All insertion
orders with an update time less than or equal to `2020-11-04T18:54:47Z (format of ISO 8601)`:
`updateTime<="2020-11-04T18:54:47Z"` * All insertion orders with an update time greater than or
equal to `2020-11-04T18:54:47Z (format of ISO 8601)`: `updateTime>="2020-11-04T18:54:47Z"` The
length of this field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by insertion order properties. Supported syntax: * Filter expressions
* are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR`
* logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has
* the form of `{field} {operator} {value}`. * The operator used on
* `budget.budget_segments.date_range.end_date` must be LESS THAN (<). * The operator used
* on `updateTime` must be `GREATER THAN OR EQUAL TO (>=)` or `LESS THAN OR EQUAL TO (<=)`.
* * The operators used on all other fields must be `EQUALS (=)`. * Supported fields: -
* `campaignId` - `displayName` - `entityStatus` -
* `budget.budget_segments.date_range.end_date` (input as YYYY-MM-DD) - `updateTime` (input
* in ISO 8601 format, or YYYY-MM-DDTHH:MM:SSZ) Examples: * All insertion orders under a
* campaign: `campaignId="1234"` * All `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`
* insertion orders under an advertiser: `(entityStatus="ENTITY_STATUS_ACTIVE" OR
* entityStatus="ENTITY_STATUS_PAUSED")` * All insertion orders whose budget segments' dates
* end before March 28, 2019: `budget.budget_segments.date_range.end_date<"2019-03-28"` *
* All insertion orders with an update time less than or equal to `2020-11-04T18:54:47Z
* (format of ISO 8601)`: `updateTime<="2020-11-04T18:54:47Z"` * All insertion orders with
* an update time greater than or equal to `2020-11-04T18:54:47Z (format of ISO 8601)`:
* `updateTime>="2020-11-04T18:54:47Z"` The length of this field should be no more than 500
* characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * "displayName" (default) *
* "entityStatus" * "updateTime" The default sorting order is ascending. To specify
* descending order for a field, a suffix "desc" should be added to the field name. Example:
* `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * "displayName" (default) * "entityStatus"
* "updateTime" The default sorting order is ascending. To specify descending order for a field, a
suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * "displayName" (default) *
* "entityStatus" * "updateTime" The default sorting order is ascending. To specify
* descending order for a field, a suffix "desc" should be added to the field name. Example:
* `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListInsertionOrders` method.
* If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListInsertionOrders` method. If not specified,
the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListInsertionOrders` method.
* If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates an existing insertion order. Returns the updated insertion order if successful.
*
* Create a request for the method "insertionOrders.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param advertiserId Output only. The unique ID of the advertiser the insertion order belongs to.
* @param insertionOrderId Output only. The unique ID of the insertion order. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.InsertionOrder}
* @return the request
*/
public Patch patch(java.lang.Long advertiserId, java.lang.Long insertionOrderId, com.google.api.services.displayvideo.v1.model.InsertionOrder content) throws java.io.IOException {
Patch result = new Patch(advertiserId, insertionOrderId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.InsertionOrder> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/insertionOrders/{+insertionOrderId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern INSERTION_ORDER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates an existing insertion order. Returns the updated insertion order if successful.
*
* Create a request for the method "insertionOrders.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Output only. The unique ID of the advertiser the insertion order belongs to.
* @param insertionOrderId Output only. The unique ID of the insertion order. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.InsertionOrder}
* @since 1.13
*/
protected Patch(java.lang.Long advertiserId, java.lang.Long insertionOrderId, com.google.api.services.displayvideo.v1.model.InsertionOrder content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.InsertionOrder.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.insertionOrderId = com.google.api.client.util.Preconditions.checkNotNull(insertionOrderId, "Required parameter insertionOrderId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the advertiser the insertion order belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Output only. The unique ID of the advertiser the insertion order belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Output only. The unique ID of the advertiser the insertion order belongs to. */
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Output only. The unique ID of the insertion order. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long insertionOrderId;
/** Output only. The unique ID of the insertion order. Assigned by the system.
*/
public java.lang.Long getInsertionOrderId() {
return insertionOrderId;
}
/** Output only. The unique ID of the insertion order. Assigned by the system. */
public Patch setInsertionOrderId(java.lang.Long insertionOrderId) {
this.insertionOrderId = insertionOrderId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the LineItems collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.LineItems.List request = displayvideo.lineItems().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public LineItems lineItems() {
return new LineItems();
}
/**
* The "lineItems" collection of methods.
*/
public class LineItems {
/**
* Bulk edits targeting options under a single line item. The operation will delete the assigned
* targeting options provided in BulkEditLineItemAssignedTargetingOptionsRequest.delete_requests and
* then create the assigned targeting options provided in
* BulkEditLineItemAssignedTargetingOptionsRequest.create_requests .
*
* Create a request for the method "lineItems.bulkEditLineItemAssignedTargetingOptions".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkEditLineItemAssignedTargetingOptions#execute()} method to invoke
* the remote operation.
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item the assigned targeting option will belong to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditLineItemAssignedTargetingOptionsRequest}
* @return the request
*/
public BulkEditLineItemAssignedTargetingOptions bulkEditLineItemAssignedTargetingOptions(java.lang.Long advertiserId, java.lang.Long lineItemId, com.google.api.services.displayvideo.v1.model.BulkEditLineItemAssignedTargetingOptionsRequest content) throws java.io.IOException {
BulkEditLineItemAssignedTargetingOptions result = new BulkEditLineItemAssignedTargetingOptions(advertiserId, lineItemId, content);
initialize(result);
return result;
}
public class BulkEditLineItemAssignedTargetingOptions extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkEditLineItemAssignedTargetingOptionsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems/{+lineItemId}:bulkEditLineItemAssignedTargetingOptions";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LINE_ITEM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Bulk edits targeting options under a single line item. The operation will delete the assigned
* targeting options provided in BulkEditLineItemAssignedTargetingOptionsRequest.delete_requests
* and then create the assigned targeting options provided in
* BulkEditLineItemAssignedTargetingOptionsRequest.create_requests .
*
* Create a request for the method "lineItems.bulkEditLineItemAssignedTargetingOptions".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkEditLineItemAssignedTargetingOptions#execute()} method
* to invoke the remote operation. <p> {@link BulkEditLineItemAssignedTargetingOptions#initialize(
* com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be called to
* initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item the assigned targeting option will belong to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditLineItemAssignedTargetingOptionsRequest}
* @since 1.13
*/
protected BulkEditLineItemAssignedTargetingOptions(java.lang.Long advertiserId, java.lang.Long lineItemId, com.google.api.services.displayvideo.v1.model.BulkEditLineItemAssignedTargetingOptionsRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.BulkEditLineItemAssignedTargetingOptionsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.lineItemId = com.google.api.client.util.Preconditions.checkNotNull(lineItemId, "Required parameter lineItemId must be specified.");
}
@Override
public BulkEditLineItemAssignedTargetingOptions set$Xgafv(java.lang.String $Xgafv) {
return (BulkEditLineItemAssignedTargetingOptions) super.set$Xgafv($Xgafv);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setAccessToken(java.lang.String accessToken) {
return (BulkEditLineItemAssignedTargetingOptions) super.setAccessToken(accessToken);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setAlt(java.lang.String alt) {
return (BulkEditLineItemAssignedTargetingOptions) super.setAlt(alt);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setCallback(java.lang.String callback) {
return (BulkEditLineItemAssignedTargetingOptions) super.setCallback(callback);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setFields(java.lang.String fields) {
return (BulkEditLineItemAssignedTargetingOptions) super.setFields(fields);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setKey(java.lang.String key) {
return (BulkEditLineItemAssignedTargetingOptions) super.setKey(key);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setOauthToken(java.lang.String oauthToken) {
return (BulkEditLineItemAssignedTargetingOptions) super.setOauthToken(oauthToken);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkEditLineItemAssignedTargetingOptions) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setQuotaUser(java.lang.String quotaUser) {
return (BulkEditLineItemAssignedTargetingOptions) super.setQuotaUser(quotaUser);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setUploadType(java.lang.String uploadType) {
return (BulkEditLineItemAssignedTargetingOptions) super.setUploadType(uploadType);
}
@Override
public BulkEditLineItemAssignedTargetingOptions setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkEditLineItemAssignedTargetingOptions) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser the line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser the line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser the line item belongs to. */
public BulkEditLineItemAssignedTargetingOptions setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the line item the assigned targeting option will belong to. */
@com.google.api.client.util.Key
private java.lang.Long lineItemId;
/** Required. The ID of the line item the assigned targeting option will belong to.
*/
public java.lang.Long getLineItemId() {
return lineItemId;
}
/** Required. The ID of the line item the assigned targeting option will belong to. */
public BulkEditLineItemAssignedTargetingOptions setLineItemId(java.lang.Long lineItemId) {
this.lineItemId = lineItemId;
return this;
}
@Override
public BulkEditLineItemAssignedTargetingOptions set(String parameterName, Object value) {
return (BulkEditLineItemAssignedTargetingOptions) super.set(parameterName, value);
}
}
/**
* Lists assigned targeting options of a line item across targeting types.
*
* Create a request for the method "lineItems.bulkListLineItemAssignedTargetingOptions".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkListLineItemAssignedTargetingOptions#execute()} method to invoke
* the remote operation.
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item to list assigned targeting options for.
* @return the request
*/
public BulkListLineItemAssignedTargetingOptions bulkListLineItemAssignedTargetingOptions(java.lang.Long advertiserId, java.lang.Long lineItemId) throws java.io.IOException {
BulkListLineItemAssignedTargetingOptions result = new BulkListLineItemAssignedTargetingOptions(advertiserId, lineItemId);
initialize(result);
return result;
}
public class BulkListLineItemAssignedTargetingOptions extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkListLineItemAssignedTargetingOptionsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems/{+lineItemId}:bulkListLineItemAssignedTargetingOptions";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LINE_ITEM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists assigned targeting options of a line item across targeting types.
*
* Create a request for the method "lineItems.bulkListLineItemAssignedTargetingOptions".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkListLineItemAssignedTargetingOptions#execute()} method
* to invoke the remote operation. <p> {@link BulkListLineItemAssignedTargetingOptions#initialize(
* com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be called to
* initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item to list assigned targeting options for.
* @since 1.13
*/
protected BulkListLineItemAssignedTargetingOptions(java.lang.Long advertiserId, java.lang.Long lineItemId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.BulkListLineItemAssignedTargetingOptionsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.lineItemId = com.google.api.client.util.Preconditions.checkNotNull(lineItemId, "Required parameter lineItemId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public BulkListLineItemAssignedTargetingOptions set$Xgafv(java.lang.String $Xgafv) {
return (BulkListLineItemAssignedTargetingOptions) super.set$Xgafv($Xgafv);
}
@Override
public BulkListLineItemAssignedTargetingOptions setAccessToken(java.lang.String accessToken) {
return (BulkListLineItemAssignedTargetingOptions) super.setAccessToken(accessToken);
}
@Override
public BulkListLineItemAssignedTargetingOptions setAlt(java.lang.String alt) {
return (BulkListLineItemAssignedTargetingOptions) super.setAlt(alt);
}
@Override
public BulkListLineItemAssignedTargetingOptions setCallback(java.lang.String callback) {
return (BulkListLineItemAssignedTargetingOptions) super.setCallback(callback);
}
@Override
public BulkListLineItemAssignedTargetingOptions setFields(java.lang.String fields) {
return (BulkListLineItemAssignedTargetingOptions) super.setFields(fields);
}
@Override
public BulkListLineItemAssignedTargetingOptions setKey(java.lang.String key) {
return (BulkListLineItemAssignedTargetingOptions) super.setKey(key);
}
@Override
public BulkListLineItemAssignedTargetingOptions setOauthToken(java.lang.String oauthToken) {
return (BulkListLineItemAssignedTargetingOptions) super.setOauthToken(oauthToken);
}
@Override
public BulkListLineItemAssignedTargetingOptions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkListLineItemAssignedTargetingOptions) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkListLineItemAssignedTargetingOptions setQuotaUser(java.lang.String quotaUser) {
return (BulkListLineItemAssignedTargetingOptions) super.setQuotaUser(quotaUser);
}
@Override
public BulkListLineItemAssignedTargetingOptions setUploadType(java.lang.String uploadType) {
return (BulkListLineItemAssignedTargetingOptions) super.setUploadType(uploadType);
}
@Override
public BulkListLineItemAssignedTargetingOptions setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkListLineItemAssignedTargetingOptions) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser the line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser the line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser the line item belongs to. */
public BulkListLineItemAssignedTargetingOptions setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the line item to list assigned targeting options for. */
@com.google.api.client.util.Key
private java.lang.Long lineItemId;
/** Required. The ID of the line item to list assigned targeting options for.
*/
public java.lang.Long getLineItemId() {
return lineItemId;
}
/** Required. The ID of the line item to list assigned targeting options for. */
public BulkListLineItemAssignedTargetingOptions setLineItemId(java.lang.Long lineItemId) {
this.lineItemId = lineItemId;
return this;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR` on the same field. * A restriction has the form of `{field}
* {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `targetingType` - `inheritance` Examples: * AssignedTargetingOptions of targeting type
* TARGETING_TYPE_PROXIMITY_LOCATION_LIST or TARGETING_TYPE_CHANNEL
* `targetingType="TARGETING_TYPE_PROXIMITY_LOCATION_LIST" OR
* targetingType="TARGETING_TYPE_CHANNEL"` * AssignedTargetingOptions with inheritance
* status of NOT_INHERITED or INHERITED_FROM_PARTNER `inheritance="NOT_INHERITED" OR
* inheritance="INHERITED_FROM_PARTNER"` The length of this field should be no more than 500
* characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by assigned targeting option properties. Supported syntax: * Filter expressions
are made up of one or more restrictions. * Restrictions can be combined by the logical operator
`OR` on the same field. * A restriction has the form of `{field} {operator} {value}`. * The
operator must be `EQUALS (=)`. * Supported fields: - `targetingType` - `inheritance` Examples: *
AssignedTargetingOptions of targeting type TARGETING_TYPE_PROXIMITY_LOCATION_LIST or
TARGETING_TYPE_CHANNEL `targetingType="TARGETING_TYPE_PROXIMITY_LOCATION_LIST" OR
targetingType="TARGETING_TYPE_CHANNEL"` * AssignedTargetingOptions with inheritance status of
NOT_INHERITED or INHERITED_FROM_PARTNER `inheritance="NOT_INHERITED" OR
inheritance="INHERITED_FROM_PARTNER"` The length of this field should be no more than 500
characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR` on the same field. * A restriction has the form of `{field}
* {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `targetingType` - `inheritance` Examples: * AssignedTargetingOptions of targeting type
* TARGETING_TYPE_PROXIMITY_LOCATION_LIST or TARGETING_TYPE_CHANNEL
* `targetingType="TARGETING_TYPE_PROXIMITY_LOCATION_LIST" OR
* targetingType="TARGETING_TYPE_CHANNEL"` * AssignedTargetingOptions with inheritance
* status of NOT_INHERITED or INHERITED_FROM_PARTNER `inheritance="NOT_INHERITED" OR
* inheritance="INHERITED_FROM_PARTNER"` The length of this field should be no more than 500
* characters.
*/
public BulkListLineItemAssignedTargetingOptions setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `targetingType` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `targetingType desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `targetingType` (default) The default
sorting order is ascending. To specify descending order for a field, a suffix "desc" should be
added to the field name. Example: `targetingType desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `targetingType` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `targetingType desc`.
*/
public BulkListLineItemAssignedTargetingOptions setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. The size must be an integer between `1` and `5000`. If unspecified,
* the default is '5000'. Returns error code `INVALID_ARGUMENT` if an invalid value is
* specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. The size must be an integer between `1` and `5000`. If unspecified, the
default is '5000'. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. The size must be an integer between `1` and `5000`. If unspecified,
* the default is '5000'. Returns error code `INVALID_ARGUMENT` if an invalid value is
* specified.
*/
public BulkListLineItemAssignedTargetingOptions setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token that lets the client fetch the next page of results. Typically, this is the value
* of next_page_token returned from the previous call to
* `BulkListLineItemAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token that lets the client fetch the next page of results. Typically, this is the value of
next_page_token returned from the previous call to `BulkListLineItemAssignedTargetingOptions`
method. If not specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token that lets the client fetch the next page of results. Typically, this is the value
* of next_page_token returned from the previous call to
* `BulkListLineItemAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
public BulkListLineItemAssignedTargetingOptions setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public BulkListLineItemAssignedTargetingOptions set(String parameterName, Object value) {
return (BulkListLineItemAssignedTargetingOptions) super.set(parameterName, value);
}
}
/**
* Creates a new line item. Returns the newly created line item if successful.
*
* Create a request for the method "lineItems.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Output only. The unique ID of the advertiser the line item belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.LineItem}
* @return the request
*/
public Create create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.LineItem content) throws java.io.IOException {
Create result = new Create(advertiserId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.LineItem> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new line item. Returns the newly created line item if successful.
*
* Create a request for the method "lineItems.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Output only. The unique ID of the advertiser the line item belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.LineItem}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.LineItem content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.LineItem.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the advertiser the line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Output only. The unique ID of the advertiser the line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Output only. The unique ID of the advertiser the line item belongs to. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a line item. Returns error code `NOT_FOUND` if the line item does not exist. The line
* item should be archived first, i.e. set entity_status to `ENTITY_STATUS_ARCHIVED`, to be able to
* delete it.
*
* Create a request for the method "lineItems.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId The ID of the advertiser this line item belongs to.
* @param lineItemId The ID of the line item we need to fetch.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.Long lineItemId) throws java.io.IOException {
Delete result = new Delete(advertiserId, lineItemId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems/{+lineItemId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LINE_ITEM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes a line item. Returns error code `NOT_FOUND` if the line item does not exist. The line
* item should be archived first, i.e. set entity_status to `ENTITY_STATUS_ARCHIVED`, to be able
* to delete it.
*
* Create a request for the method "lineItems.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId The ID of the advertiser this line item belongs to.
* @param lineItemId The ID of the line item we need to fetch.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.Long lineItemId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.lineItemId = com.google.api.client.util.Preconditions.checkNotNull(lineItemId, "Required parameter lineItemId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser this line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser this line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser this line item belongs to. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** The ID of the line item we need to fetch. */
@com.google.api.client.util.Key
private java.lang.Long lineItemId;
/** The ID of the line item we need to fetch.
*/
public java.lang.Long getLineItemId() {
return lineItemId;
}
/** The ID of the line item we need to fetch. */
public Delete setLineItemId(java.lang.Long lineItemId) {
this.lineItemId = lineItemId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets a line item.
*
* Create a request for the method "lineItems.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser this line item belongs to.
* @param lineItemId Required. The ID of the line item to fetch.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.Long lineItemId) throws java.io.IOException {
Get result = new Get(advertiserId, lineItemId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.LineItem> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems/{+lineItemId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LINE_ITEM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a line item.
*
* Create a request for the method "lineItems.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser this line item belongs to.
* @param lineItemId Required. The ID of the line item to fetch.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.Long lineItemId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.LineItem.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.lineItemId = com.google.api.client.util.Preconditions.checkNotNull(lineItemId, "Required parameter lineItemId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser this line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser this line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser this line item belongs to. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the line item to fetch. */
@com.google.api.client.util.Key
private java.lang.Long lineItemId;
/** Required. The ID of the line item to fetch.
*/
public java.lang.Long getLineItemId() {
return lineItemId;
}
/** Required. The ID of the line item to fetch. */
public Get setLineItemId(java.lang.Long lineItemId) {
this.lineItemId = lineItemId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists line items in an advertiser. The order is defined by the order_by parameter. If a filter by
* entity_status is not specified, line items with `ENTITY_STATUS_ARCHIVED` will not be included in
* the results.
*
* Create a request for the method "lineItems.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser to list line items for.
* @return the request
*/
public List list(java.lang.Long advertiserId) throws java.io.IOException {
List result = new List(advertiserId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListLineItemsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists line items in an advertiser. The order is defined by the order_by parameter. If a filter
* by entity_status is not specified, line items with `ENTITY_STATUS_ARCHIVED` will not be
* included in the results.
*
* Create a request for the method "lineItems.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser to list line items for.
* @since 1.13
*/
protected List(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListLineItemsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser to list line items for. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser to list line items for.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser to list line items for. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by line item properties. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator used on `flight.dateRange.endDate` must
* be LESS THAN (<). * The operator used on `updateTime` must be `GREATER THAN OR EQUAL TO
* (>=)` or `LESS THAN OR EQUAL TO (<=)`. * The operator used on `warningMessages` must be
* `HAS (:)`. * The operators used on all other fields must be `EQUALS (=)`. * Supported
* fields: - `campaignId` - `displayName` - `insertionOrderId` - `entityStatus` -
* `lineItemId` - `lineItemType` - `flight.dateRange.endDate` (input formatted as YYYY-MM-
* DD) - `warningMessages` - `flight.triggerId` - `updateTime` (input in ISO 8601 format, or
* YYYY-MM-DDTHH:MM:SSZ) * The operator can be `NO LESS THAN (>=)` or `NO GREATER THAN
* (<=)`. - `updateTime` (format of ISO 8601) Examples: * All line items under an insertion
* order: `insertionOrderId="1234"` * All `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`
* and `LINE_ITEM_TYPE_DISPLAY_DEFAULT` line items under an advertiser:
* `(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED") AND
* lineItemType="LINE_ITEM_TYPE_DISPLAY_DEFAULT"` * All line items whose flight dates end
* before March 28, 2019: `flight.dateRange.endDate<"2019-03-28"` * All line items that have
* `NO_VALID_CREATIVE` in `warningMessages`: `warningMessages:"NO_VALID_CREATIVE"` * All
* line items with an update time less than or equal to `2020-11-04T18:54:47Z (format of ISO
* 8601)`: `updateTime<="2020-11-04T18:54:47Z"` * All line items with an update time greater
* than or equal to `2020-11-04T18:54:47Z (format of ISO 8601)`:
* `updateTime>="2020-11-04T18:54:47Z"` The length of this field should be no more than 500
* characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by line item properties. Supported syntax: * Filter expressions are made up of one
or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence
of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator}
{value}`. * The operator used on `flight.dateRange.endDate` must be LESS THAN (<). * The operator
used on `updateTime` must be `GREATER THAN OR EQUAL TO (>=)` or `LESS THAN OR EQUAL TO (<=)`. * The
operator used on `warningMessages` must be `HAS (:)`. * The operators used on all other fields must
be `EQUALS (=)`. * Supported fields: - `campaignId` - `displayName` - `insertionOrderId` -
`entityStatus` - `lineItemId` - `lineItemType` - `flight.dateRange.endDate` (input formatted as
YYYY-MM-DD) - `warningMessages` - `flight.triggerId` - `updateTime` (input in ISO 8601 format, or
YYYY-MM-DDTHH:MM:SSZ) * The operator can be `NO LESS THAN (>=)` or `NO GREATER THAN (<=)`. -
`updateTime` (format of ISO 8601) Examples: * All line items under an insertion order:
`insertionOrderId="1234"` * All `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED` and
`LINE_ITEM_TYPE_DISPLAY_DEFAULT` line items under an advertiser:
`(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED") AND
lineItemType="LINE_ITEM_TYPE_DISPLAY_DEFAULT"` * All line items whose flight dates end before March
28, 2019: `flight.dateRange.endDate<"2019-03-28"` * All line items that have `NO_VALID_CREATIVE` in
`warningMessages`: `warningMessages:"NO_VALID_CREATIVE"` * All line items with an update time less
than or equal to `2020-11-04T18:54:47Z (format of ISO 8601)`: `updateTime<="2020-11-04T18:54:47Z"`
* All line items with an update time greater than or equal to `2020-11-04T18:54:47Z (format of ISO
8601)`: `updateTime>="2020-11-04T18:54:47Z"` The length of this field should be no more than 500
characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by line item properties. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator used on `flight.dateRange.endDate` must
* be LESS THAN (<). * The operator used on `updateTime` must be `GREATER THAN OR EQUAL TO
* (>=)` or `LESS THAN OR EQUAL TO (<=)`. * The operator used on `warningMessages` must be
* `HAS (:)`. * The operators used on all other fields must be `EQUALS (=)`. * Supported
* fields: - `campaignId` - `displayName` - `insertionOrderId` - `entityStatus` -
* `lineItemId` - `lineItemType` - `flight.dateRange.endDate` (input formatted as YYYY-MM-
* DD) - `warningMessages` - `flight.triggerId` - `updateTime` (input in ISO 8601 format, or
* YYYY-MM-DDTHH:MM:SSZ) * The operator can be `NO LESS THAN (>=)` or `NO GREATER THAN
* (<=)`. - `updateTime` (format of ISO 8601) Examples: * All line items under an insertion
* order: `insertionOrderId="1234"` * All `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`
* and `LINE_ITEM_TYPE_DISPLAY_DEFAULT` line items under an advertiser:
* `(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED") AND
* lineItemType="LINE_ITEM_TYPE_DISPLAY_DEFAULT"` * All line items whose flight dates end
* before March 28, 2019: `flight.dateRange.endDate<"2019-03-28"` * All line items that have
* `NO_VALID_CREATIVE` in `warningMessages`: `warningMessages:"NO_VALID_CREATIVE"` * All
* line items with an update time less than or equal to `2020-11-04T18:54:47Z (format of ISO
* 8601)`: `updateTime<="2020-11-04T18:54:47Z"` * All line items with an update time greater
* than or equal to `2020-11-04T18:54:47Z (format of ISO 8601)`:
* `updateTime>="2020-11-04T18:54:47Z"` The length of this field should be no more than 500
* characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * "displayName" (default) *
* "entityStatus" * “flight.dateRange.endDate” * "updateTime" The default sorting order is
* ascending. To specify descending order for a field, a suffix "desc" should be added to
* the field name. Example: `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * "displayName" (default) * "entityStatus"
* “flight.dateRange.endDate” * "updateTime" The default sorting order is ascending. To specify
descending order for a field, a suffix "desc" should be added to the field name. Example:
`displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * "displayName" (default) *
* "entityStatus" * “flight.dateRange.endDate” * "updateTime" The default sorting order is
* ascending. To specify descending order for a field, a suffix "desc" should be added to
* the field name. Example: `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListLineItems` method. If
* not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListLineItems` method. If not specified, the
first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListLineItems` method. If
* not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates an existing line item. Returns the updated line item if successful.
*
* Create a request for the method "lineItems.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param advertiserId Output only. The unique ID of the advertiser the line item belongs to.
* @param lineItemId Output only. The unique ID of the line item. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.LineItem}
* @return the request
*/
public Patch patch(java.lang.Long advertiserId, java.lang.Long lineItemId, com.google.api.services.displayvideo.v1.model.LineItem content) throws java.io.IOException {
Patch result = new Patch(advertiserId, lineItemId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.LineItem> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems/{+lineItemId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LINE_ITEM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates an existing line item. Returns the updated line item if successful.
*
* Create a request for the method "lineItems.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Output only. The unique ID of the advertiser the line item belongs to.
* @param lineItemId Output only. The unique ID of the line item. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.LineItem}
* @since 1.13
*/
protected Patch(java.lang.Long advertiserId, java.lang.Long lineItemId, com.google.api.services.displayvideo.v1.model.LineItem content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.LineItem.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.lineItemId = com.google.api.client.util.Preconditions.checkNotNull(lineItemId, "Required parameter lineItemId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the advertiser the line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Output only. The unique ID of the advertiser the line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Output only. The unique ID of the advertiser the line item belongs to. */
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Output only. The unique ID of the line item. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long lineItemId;
/** Output only. The unique ID of the line item. Assigned by the system.
*/
public java.lang.Long getLineItemId() {
return lineItemId;
}
/** Output only. The unique ID of the line item. Assigned by the system. */
public Patch setLineItemId(java.lang.Long lineItemId) {
this.lineItemId = lineItemId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the TargetingTypes collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.TargetingTypes.List request = displayvideo.targetingTypes().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public TargetingTypes targetingTypes() {
return new TargetingTypes();
}
/**
* The "targetingTypes" collection of methods.
*/
public class TargetingTypes {
/**
* An accessor for creating requests from the AssignedTargetingOptions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.AssignedTargetingOptions.List request = displayvideo.assignedTargetingOptions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public AssignedTargetingOptions assignedTargetingOptions() {
return new AssignedTargetingOptions();
}
/**
* The "assignedTargetingOptions" collection of methods.
*/
public class AssignedTargetingOptions {
/**
* Assigns a targeting option to a line item. Returns the assigned targeting option if successful.
*
* Create a request for the method "assignedTargetingOptions.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item the assigned targeting option will belong to.
* @param targetingType Required. Identifies the type of this assigned targeting option.
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedTargetingOption}
* @return the request
*/
public Create create(java.lang.Long advertiserId, java.lang.Long lineItemId, java.lang.String targetingType, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption content) throws java.io.IOException {
Create result = new Create(advertiserId, lineItemId, targetingType, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.AssignedTargetingOption> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems/{+lineItemId}/targetingTypes/{+targetingType}/assignedTargetingOptions";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LINE_ITEM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Assigns a targeting option to a line item. Returns the assigned targeting option if successful.
*
* Create a request for the method "assignedTargetingOptions.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item the assigned targeting option will belong to.
* @param targetingType Required. Identifies the type of this assigned targeting option.
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedTargetingOption}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, java.lang.Long lineItemId, java.lang.String targetingType, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.lineItemId = com.google.api.client.util.Preconditions.checkNotNull(lineItemId, "Required parameter lineItemId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser the line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser the line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser the line item belongs to. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the line item the assigned targeting option will belong to. */
@com.google.api.client.util.Key
private java.lang.Long lineItemId;
/** Required. The ID of the line item the assigned targeting option will belong to.
*/
public java.lang.Long getLineItemId() {
return lineItemId;
}
/** Required. The ID of the line item the assigned targeting option will belong to. */
public Create setLineItemId(java.lang.Long lineItemId) {
this.lineItemId = lineItemId;
return this;
}
/** Required. Identifies the type of this assigned targeting option. */
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of this assigned targeting option.
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/** Required. Identifies the type of this assigned targeting option. */
public Create setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes an assigned targeting option from a line item.
*
* Create a request for the method "assignedTargetingOptions.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item the assigned targeting option belongs to.
* @param targetingType Required. Identifies the type of this assigned targeting option.
* @param assignedTargetingOptionId Required. The ID of the assigned targeting option to delete.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.Long lineItemId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) throws java.io.IOException {
Delete result = new Delete(advertiserId, lineItemId, targetingType, assignedTargetingOptionId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems/{+lineItemId}/targetingTypes/{+targetingType}/assignedTargetingOptions/{+assignedTargetingOptionId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LINE_ITEM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ASSIGNED_TARGETING_OPTION_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes an assigned targeting option from a line item.
*
* Create a request for the method "assignedTargetingOptions.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item the assigned targeting option belongs to.
* @param targetingType Required. Identifies the type of this assigned targeting option.
* @param assignedTargetingOptionId Required. The ID of the assigned targeting option to delete.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.Long lineItemId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.lineItemId = com.google.api.client.util.Preconditions.checkNotNull(lineItemId, "Required parameter lineItemId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = com.google.api.client.util.Preconditions.checkNotNull(assignedTargetingOptionId, "Required parameter assignedTargetingOptionId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser the line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser the line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser the line item belongs to. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the line item the assigned targeting option belongs to. */
@com.google.api.client.util.Key
private java.lang.Long lineItemId;
/** Required. The ID of the line item the assigned targeting option belongs to.
*/
public java.lang.Long getLineItemId() {
return lineItemId;
}
/** Required. The ID of the line item the assigned targeting option belongs to. */
public Delete setLineItemId(java.lang.Long lineItemId) {
this.lineItemId = lineItemId;
return this;
}
/** Required. Identifies the type of this assigned targeting option. */
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of this assigned targeting option.
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/** Required. Identifies the type of this assigned targeting option. */
public Delete setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/** Required. The ID of the assigned targeting option to delete. */
@com.google.api.client.util.Key
private java.lang.String assignedTargetingOptionId;
/** Required. The ID of the assigned targeting option to delete.
*/
public java.lang.String getAssignedTargetingOptionId() {
return assignedTargetingOptionId;
}
/** Required. The ID of the assigned targeting option to delete. */
public Delete setAssignedTargetingOptionId(java.lang.String assignedTargetingOptionId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = assignedTargetingOptionId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets a single targeting option assigned to a line item.
*
* Create a request for the method "assignedTargetingOptions.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item the assigned targeting option belongs to.
* @param targetingType Required. Identifies the type of this assigned targeting option.
* @param assignedTargetingOptionId Required. An identifier unique to the targeting type in this line item that identifies the assigned
* targeting option being requested.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.Long lineItemId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) throws java.io.IOException {
Get result = new Get(advertiserId, lineItemId, targetingType, assignedTargetingOptionId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.AssignedTargetingOption> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems/{+lineItemId}/targetingTypes/{+targetingType}/assignedTargetingOptions/{+assignedTargetingOptionId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LINE_ITEM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ASSIGNED_TARGETING_OPTION_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a single targeting option assigned to a line item.
*
* Create a request for the method "assignedTargetingOptions.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item the assigned targeting option belongs to.
* @param targetingType Required. Identifies the type of this assigned targeting option.
* @param assignedTargetingOptionId Required. An identifier unique to the targeting type in this line item that identifies the assigned
* targeting option being requested.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.Long lineItemId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.lineItemId = com.google.api.client.util.Preconditions.checkNotNull(lineItemId, "Required parameter lineItemId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = com.google.api.client.util.Preconditions.checkNotNull(assignedTargetingOptionId, "Required parameter assignedTargetingOptionId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser the line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser the line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser the line item belongs to. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the line item the assigned targeting option belongs to. */
@com.google.api.client.util.Key
private java.lang.Long lineItemId;
/** Required. The ID of the line item the assigned targeting option belongs to.
*/
public java.lang.Long getLineItemId() {
return lineItemId;
}
/** Required. The ID of the line item the assigned targeting option belongs to. */
public Get setLineItemId(java.lang.Long lineItemId) {
this.lineItemId = lineItemId;
return this;
}
/** Required. Identifies the type of this assigned targeting option. */
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of this assigned targeting option.
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/** Required. Identifies the type of this assigned targeting option. */
public Get setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/**
* Required. An identifier unique to the targeting type in this line item that
* identifies the assigned targeting option being requested.
*/
@com.google.api.client.util.Key
private java.lang.String assignedTargetingOptionId;
/** Required. An identifier unique to the targeting type in this line item that identifies the assigned
targeting option being requested.
*/
public java.lang.String getAssignedTargetingOptionId() {
return assignedTargetingOptionId;
}
/**
* Required. An identifier unique to the targeting type in this line item that
* identifies the assigned targeting option being requested.
*/
public Get setAssignedTargetingOptionId(java.lang.String assignedTargetingOptionId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = assignedTargetingOptionId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists the targeting options assigned to a line item.
*
* Create a request for the method "assignedTargetingOptions.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item to list assigned targeting options for.
* @param targetingType Required. Identifies the type of assigned targeting options to list.
* @return the request
*/
public List list(java.lang.Long advertiserId, java.lang.Long lineItemId, java.lang.String targetingType) throws java.io.IOException {
List result = new List(advertiserId, lineItemId, targetingType);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListLineItemAssignedTargetingOptionsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/lineItems/{+lineItemId}/targetingTypes/{+targetingType}/assignedTargetingOptions";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LINE_ITEM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists the targeting options assigned to a line item.
*
* Create a request for the method "assignedTargetingOptions.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser the line item belongs to.
* @param lineItemId Required. The ID of the line item to list assigned targeting options for.
* @param targetingType Required. Identifies the type of assigned targeting options to list.
* @since 1.13
*/
protected List(java.lang.Long advertiserId, java.lang.Long lineItemId, java.lang.String targetingType) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListLineItemAssignedTargetingOptionsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.lineItemId = com.google.api.client.util.Preconditions.checkNotNull(lineItemId, "Required parameter lineItemId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser the line item belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser the line item belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser the line item belongs to. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the line item to list assigned targeting options for. */
@com.google.api.client.util.Key
private java.lang.Long lineItemId;
/** Required. The ID of the line item to list assigned targeting options for.
*/
public java.lang.Long getLineItemId() {
return lineItemId;
}
/** Required. The ID of the line item to list assigned targeting options for. */
public List setLineItemId(java.lang.Long lineItemId) {
this.lineItemId = lineItemId;
return this;
}
/** Required. Identifies the type of assigned targeting options to list. */
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of assigned targeting options to list.
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/** Required. Identifies the type of assigned targeting options to list. */
public List setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined
* by the logical operator `OR`. * A restriction has the form of `{field} {operator}
* {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `assignedTargetingOptionId` - `inheritance` Examples: * AssignedTargetingOptions with
* ID 1 or 2 `assignedTargetingOptionId="1" OR assignedTargetingOptionId="2"` *
* AssignedTargetingOptions with inheritance status of NOT_INHERITED or
* INHERITED_FROM_PARTNER `inheritance="NOT_INHERITED" OR
* inheritance="INHERITED_FROM_PARTNER"` The length of this field should be no more than
* 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by assigned targeting option properties. Supported syntax: * Filter expressions
are made up of one or more restrictions. * Restrictions can be combined by the logical operator
`OR`. * A restriction has the form of `{field} {operator} {value}`. * The operator must be `EQUALS
(=)`. * Supported fields: - `assignedTargetingOptionId` - `inheritance` Examples: *
AssignedTargetingOptions with ID 1 or 2 `assignedTargetingOptionId="1" OR
assignedTargetingOptionId="2"` * AssignedTargetingOptions with inheritance status of NOT_INHERITED
or INHERITED_FROM_PARTNER `inheritance="NOT_INHERITED" OR inheritance="INHERITED_FROM_PARTNER"` The
length of this field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined
* by the logical operator `OR`. * A restriction has the form of `{field} {operator}
* {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `assignedTargetingOptionId` - `inheritance` Examples: * AssignedTargetingOptions with
* ID 1 or 2 `assignedTargetingOptionId="1" OR assignedTargetingOptionId="2"` *
* AssignedTargetingOptions with inheritance status of NOT_INHERITED or
* INHERITED_FROM_PARTNER `inheritance="NOT_INHERITED" OR
* inheritance="INHERITED_FROM_PARTNER"` The length of this field should be no more than
* 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedTargetingOptionId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example:
* `assignedTargetingOptionId desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `assignedTargetingOptionId` (default) The
default sorting order is ascending. To specify descending order for a field, a suffix "desc" should
be added to the field name. Example: `assignedTargetingOptionId desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedTargetingOptionId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example:
* `assignedTargetingOptionId desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is
* the value of next_page_token returned from the previous call to
* `ListLineItemAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListLineItemAssignedTargetingOptions` method.
If not specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is
* the value of next_page_token returned from the previous call to
* `ListLineItemAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
}
/**
* An accessor for creating requests from the LocationLists collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.LocationLists.List request = displayvideo.locationLists().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public LocationLists locationLists() {
return new LocationLists();
}
/**
* The "locationLists" collection of methods.
*/
public class LocationLists {
/**
* Creates a new location list. Returns the newly created location list if successful.
*
* Create a request for the method "locationLists.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param content the {@link com.google.api.services.displayvideo.v1.model.LocationList}
* @return the request
*/
public Create create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.LocationList content) throws java.io.IOException {
Create result = new Create(advertiserId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.LocationList> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/locationLists";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new location list. Returns the newly created location list if successful.
*
* Create a request for the method "locationLists.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param content the {@link com.google.api.services.displayvideo.v1.model.LocationList}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.LocationList content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.LocationList.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the location list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Gets a location list.
*
* Create a request for the method "locationLists.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the fetched location list belongs.
* @param locationListId Required. The ID of the location list to fetch.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.Long locationListId) throws java.io.IOException {
Get result = new Get(advertiserId, locationListId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.LocationList> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/locationLists/{+locationListId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern LOCATION_LIST_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a location list.
*
* Create a request for the method "locationLists.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the fetched location list belongs.
* @param locationListId Required. The ID of the location list to fetch.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.Long locationListId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.LocationList.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.locationListId = com.google.api.client.util.Preconditions.checkNotNull(locationListId, "Required parameter locationListId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the fetched location list belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the fetched location list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the fetched location list belongs. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the location list to fetch. */
@com.google.api.client.util.Key
private java.lang.Long locationListId;
/** Required. The ID of the location list to fetch.
*/
public java.lang.Long getLocationListId() {
return locationListId;
}
/** Required. The ID of the location list to fetch. */
public Get setLocationListId(java.lang.Long locationListId) {
this.locationListId = locationListId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists location lists based on a given advertiser id.
*
* Create a request for the method "locationLists.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the fetched location lists belong.
* @return the request
*/
public List list(java.lang.Long advertiserId) throws java.io.IOException {
List result = new List(advertiserId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListLocationListsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/locationLists";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists location lists based on a given advertiser id.
*
* Create a request for the method "locationLists.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the fetched location lists belong.
* @since 1.13
*/
protected List(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListLocationListsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the fetched location lists belong. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the fetched location lists belong.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the fetched location lists belong. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by location list fields. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported fields:
* - `locationType` Examples: * All regional location list:
* `locationType="TARGETING_LOCATION_TYPE_REGIONAL"` * All proximity location list:
* `locationType="TARGETING_LOCATION_TYPE_PROXIMITY"`
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by location list fields. Supported syntax: * Filter expressions are made up of one
or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence
of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator}
{value}`. * The operator must be `EQUALS (=)`. * Supported fields: - `locationType` Examples: * All
regional location list: `locationType="TARGETING_LOCATION_TYPE_REGIONAL"` * All proximity location
list: `locationType="TARGETING_LOCATION_TYPE_PROXIMITY"`
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by location list fields. Supported syntax: * Filter expressions are made
* up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported fields:
* - `locationType` Examples: * All regional location list:
* `locationType="TARGETING_LOCATION_TYPE_REGIONAL"` * All proximity location list:
* `locationType="TARGETING_LOCATION_TYPE_PROXIMITY"`
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `locationListId` (default) *
* `displayName` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `locationListId` (default) *
`displayName` The default sorting order is ascending. To specify descending order for a field, a
suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `locationListId` (default) *
* `displayName` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. Defaults to `100` if not set. Returns
* error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. Defaults to `100` if not set. Returns error
code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. Defaults to `100` if not set. Returns
* error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListLocationLists` method.
* If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListLocationLists` method. If not specified,
the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListLocationLists` method.
* If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates a location list. Returns the updated location list if successful.
*
* Create a request for the method "locationLists.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location lists belongs.
* @param locationListId Output only. The unique ID of the location list. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.LocationList}
* @return the request
*/
public Patch patch(java.lang.Long advertiserId, java.lang.Long locationListId, com.google.api.services.displayvideo.v1.model.LocationList content) throws java.io.IOException {
Patch result = new Patch(advertiserId, locationListId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.LocationList> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/locationLists/{locationListId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates a location list. Returns the updated location list if successful.
*
* Create a request for the method "locationLists.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location lists belongs.
* @param locationListId Output only. The unique ID of the location list. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.LocationList}
* @since 1.13
*/
protected Patch(java.lang.Long advertiserId, java.lang.Long locationListId, com.google.api.services.displayvideo.v1.model.LocationList content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.LocationList.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.locationListId = com.google.api.client.util.Preconditions.checkNotNull(locationListId, "Required parameter locationListId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the location lists belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the location lists belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the location lists belongs. */
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Output only. The unique ID of the location list. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long locationListId;
/** Output only. The unique ID of the location list. Assigned by the system.
*/
public java.lang.Long getLocationListId() {
return locationListId;
}
/** Output only. The unique ID of the location list. Assigned by the system. */
public Patch setLocationListId(java.lang.Long locationListId) {
this.locationListId = locationListId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the AssignedLocations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.AssignedLocations.List request = displayvideo.assignedLocations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public AssignedLocations assignedLocations() {
return new AssignedLocations();
}
/**
* The "assignedLocations" collection of methods.
*/
public class AssignedLocations {
/**
* Bulk edits multiple assignments between locations and a single location list. The operation will
* delete the assigned locations provided in
* BulkEditAssignedLocationsRequest.deleted_assigned_locations and then create the assigned
* locations provided in BulkEditAssignedLocationsRequest.created_assigned_locations.
*
* Create a request for the method "assignedLocations.bulkEdit".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param locationListId Required. The ID of the location list to which these assignments are assigned.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditAssignedLocationsRequest}
* @return the request
*/
public BulkEdit bulkEdit(java.lang.Long advertiserId, java.lang.Long locationListId, com.google.api.services.displayvideo.v1.model.BulkEditAssignedLocationsRequest content) throws java.io.IOException {
BulkEdit result = new BulkEdit(advertiserId, locationListId, content);
initialize(result);
return result;
}
public class BulkEdit extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkEditAssignedLocationsResponse> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/locationLists/{+locationListId}/assignedLocations:bulkEdit";
private final java.util.regex.Pattern LOCATION_LIST_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Bulk edits multiple assignments between locations and a single location list. The operation
* will delete the assigned locations provided in
* BulkEditAssignedLocationsRequest.deleted_assigned_locations and then create the assigned
* locations provided in BulkEditAssignedLocationsRequest.created_assigned_locations.
*
* Create a request for the method "assignedLocations.bulkEdit".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
* <p> {@link
* BulkEdit#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param locationListId Required. The ID of the location list to which these assignments are assigned.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditAssignedLocationsRequest}
* @since 1.13
*/
protected BulkEdit(java.lang.Long advertiserId, java.lang.Long locationListId, com.google.api.services.displayvideo.v1.model.BulkEditAssignedLocationsRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.BulkEditAssignedLocationsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.locationListId = com.google.api.client.util.Preconditions.checkNotNull(locationListId, "Required parameter locationListId must be specified.");
}
@Override
public BulkEdit set$Xgafv(java.lang.String $Xgafv) {
return (BulkEdit) super.set$Xgafv($Xgafv);
}
@Override
public BulkEdit setAccessToken(java.lang.String accessToken) {
return (BulkEdit) super.setAccessToken(accessToken);
}
@Override
public BulkEdit setAlt(java.lang.String alt) {
return (BulkEdit) super.setAlt(alt);
}
@Override
public BulkEdit setCallback(java.lang.String callback) {
return (BulkEdit) super.setCallback(callback);
}
@Override
public BulkEdit setFields(java.lang.String fields) {
return (BulkEdit) super.setFields(fields);
}
@Override
public BulkEdit setKey(java.lang.String key) {
return (BulkEdit) super.setKey(key);
}
@Override
public BulkEdit setOauthToken(java.lang.String oauthToken) {
return (BulkEdit) super.setOauthToken(oauthToken);
}
@Override
public BulkEdit setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkEdit) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkEdit setQuotaUser(java.lang.String quotaUser) {
return (BulkEdit) super.setQuotaUser(quotaUser);
}
@Override
public BulkEdit setUploadType(java.lang.String uploadType) {
return (BulkEdit) super.setUploadType(uploadType);
}
@Override
public BulkEdit setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkEdit) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the location list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
public BulkEdit setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the location list to which these assignments are assigned. */
@com.google.api.client.util.Key
private java.lang.Long locationListId;
/** Required. The ID of the location list to which these assignments are assigned.
*/
public java.lang.Long getLocationListId() {
return locationListId;
}
/** Required. The ID of the location list to which these assignments are assigned. */
public BulkEdit setLocationListId(java.lang.Long locationListId) {
this.locationListId = locationListId;
return this;
}
@Override
public BulkEdit set(String parameterName, Object value) {
return (BulkEdit) super.set(parameterName, value);
}
}
/**
* Creates an assignment between a location and a location list.
*
* Create a request for the method "assignedLocations.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param locationListId Required. The ID of the location list for which the assignment will be created.
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedLocation}
* @return the request
*/
public Create create(java.lang.Long advertiserId, java.lang.Long locationListId, com.google.api.services.displayvideo.v1.model.AssignedLocation content) throws java.io.IOException {
Create result = new Create(advertiserId, locationListId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.AssignedLocation> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/locationLists/{locationListId}/assignedLocations";
/**
* Creates an assignment between a location and a location list.
*
* Create a request for the method "assignedLocations.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param locationListId Required. The ID of the location list for which the assignment will be created.
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedLocation}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, java.lang.Long locationListId, com.google.api.services.displayvideo.v1.model.AssignedLocation content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.AssignedLocation.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.locationListId = com.google.api.client.util.Preconditions.checkNotNull(locationListId, "Required parameter locationListId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the location list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the location list for which the assignment will be created. */
@com.google.api.client.util.Key
private java.lang.Long locationListId;
/** Required. The ID of the location list for which the assignment will be created.
*/
public java.lang.Long getLocationListId() {
return locationListId;
}
/** Required. The ID of the location list for which the assignment will be created. */
public Create setLocationListId(java.lang.Long locationListId) {
this.locationListId = locationListId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes the assignment between a location and a location list.
*
* Create a request for the method "assignedLocations.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param locationListId Required. The ID of the location list to which this assignment is assigned.
* @param assignedLocationId Required. The ID of the assigned location to delete.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.Long locationListId, java.lang.Long assignedLocationId) throws java.io.IOException {
Delete result = new Delete(advertiserId, locationListId, assignedLocationId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/locationLists/{locationListId}/assignedLocations/{+assignedLocationId}";
private final java.util.regex.Pattern ASSIGNED_LOCATION_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes the assignment between a location and a location list.
*
* Create a request for the method "assignedLocations.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param locationListId Required. The ID of the location list to which this assignment is assigned.
* @param assignedLocationId Required. The ID of the assigned location to delete.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.Long locationListId, java.lang.Long assignedLocationId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.locationListId = com.google.api.client.util.Preconditions.checkNotNull(locationListId, "Required parameter locationListId must be specified.");
this.assignedLocationId = com.google.api.client.util.Preconditions.checkNotNull(assignedLocationId, "Required parameter assignedLocationId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the location list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the location list to which this assignment is assigned. */
@com.google.api.client.util.Key
private java.lang.Long locationListId;
/** Required. The ID of the location list to which this assignment is assigned.
*/
public java.lang.Long getLocationListId() {
return locationListId;
}
/** Required. The ID of the location list to which this assignment is assigned. */
public Delete setLocationListId(java.lang.Long locationListId) {
this.locationListId = locationListId;
return this;
}
/** Required. The ID of the assigned location to delete. */
@com.google.api.client.util.Key
private java.lang.Long assignedLocationId;
/** Required. The ID of the assigned location to delete.
*/
public java.lang.Long getAssignedLocationId() {
return assignedLocationId;
}
/** Required. The ID of the assigned location to delete. */
public Delete setAssignedLocationId(java.lang.Long assignedLocationId) {
this.assignedLocationId = assignedLocationId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Lists locations assigned to a location list.
*
* Create a request for the method "assignedLocations.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param locationListId Required. The ID of the location list to which these assignments are assigned.
* @return the request
*/
public List list(java.lang.Long advertiserId, java.lang.Long locationListId) throws java.io.IOException {
List result = new List(advertiserId, locationListId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListAssignedLocationsResponse> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/locationLists/{locationListId}/assignedLocations";
/**
* Lists locations assigned to a location list.
*
* Create a request for the method "assignedLocations.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the location list belongs.
* @param locationListId Required. The ID of the location list to which these assignments are assigned.
* @since 1.13
*/
protected List(java.lang.Long advertiserId, java.lang.Long locationListId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListAssignedLocationsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.locationListId = com.google.api.client.util.Preconditions.checkNotNull(locationListId, "Required parameter locationListId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the location list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the location list belongs. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the location list to which these assignments are assigned. */
@com.google.api.client.util.Key
private java.lang.Long locationListId;
/** Required. The ID of the location list to which these assignments are assigned.
*/
public java.lang.Long getLocationListId() {
return locationListId;
}
/** Required. The ID of the location list to which these assignments are assigned. */
public List setLocationListId(java.lang.Long locationListId) {
this.locationListId = locationListId;
return this;
}
/**
* Allows filtering by location list assignment fields. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR`. * A restriction has the form of `{field} {operator}
* {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `assignedLocationId` The length of this field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by location list assignment fields. Supported syntax: * Filter expressions are
made up of one or more restrictions. * Restrictions can be combined by the logical operator `OR`. *
A restriction has the form of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. *
Supported fields: - `assignedLocationId` The length of this field should be no more than 500
characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by location list assignment fields. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR`. * A restriction has the form of `{field} {operator}
* {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `assignedLocationId` The length of this field should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedLocationId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix " desc" should be added to the field name. Example: `assignedLocationId
* desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `assignedLocationId` (default) The
default sorting order is ascending. To specify descending order for a field, a suffix " desc"
should be added to the field name. Example: `assignedLocationId desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedLocationId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix " desc" should be added to the field name. Example: `assignedLocationId
* desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListAssignedLocations`
* method. If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListAssignedLocations` method. If not
specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListAssignedLocations`
* method. If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the ManualTriggers collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.ManualTriggers.List request = displayvideo.manualTriggers().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public ManualTriggers manualTriggers() {
return new ManualTriggers();
}
/**
* The "manualTriggers" collection of methods.
*/
public class ManualTriggers {
/**
* Activates a manual trigger. Each activation of the manual trigger must be at least 5 minutes
* apart, otherwise an error will be returned.
*
* Create a request for the method "manualTriggers.activate".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Activate#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser that the manual trigger belongs.
* @param triggerId Required. The ID of the manual trigger to activate.
* @param content the {@link com.google.api.services.displayvideo.v1.model.ActivateManualTriggerRequest}
* @return the request
*/
public Activate activate(java.lang.Long advertiserId, java.lang.Long triggerId, com.google.api.services.displayvideo.v1.model.ActivateManualTriggerRequest content) throws java.io.IOException {
Activate result = new Activate(advertiserId, triggerId, content);
initialize(result);
return result;
}
public class Activate extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ManualTrigger> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/manualTriggers/{+triggerId}:activate";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TRIGGER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Activates a manual trigger. Each activation of the manual trigger must be at least 5 minutes
* apart, otherwise an error will be returned.
*
* Create a request for the method "manualTriggers.activate".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Activate#execute()} method to invoke the remote operation.
* <p> {@link
* Activate#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser that the manual trigger belongs.
* @param triggerId Required. The ID of the manual trigger to activate.
* @param content the {@link com.google.api.services.displayvideo.v1.model.ActivateManualTriggerRequest}
* @since 1.13
*/
protected Activate(java.lang.Long advertiserId, java.lang.Long triggerId, com.google.api.services.displayvideo.v1.model.ActivateManualTriggerRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.ManualTrigger.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.triggerId = com.google.api.client.util.Preconditions.checkNotNull(triggerId, "Required parameter triggerId must be specified.");
}
@Override
public Activate set$Xgafv(java.lang.String $Xgafv) {
return (Activate) super.set$Xgafv($Xgafv);
}
@Override
public Activate setAccessToken(java.lang.String accessToken) {
return (Activate) super.setAccessToken(accessToken);
}
@Override
public Activate setAlt(java.lang.String alt) {
return (Activate) super.setAlt(alt);
}
@Override
public Activate setCallback(java.lang.String callback) {
return (Activate) super.setCallback(callback);
}
@Override
public Activate setFields(java.lang.String fields) {
return (Activate) super.setFields(fields);
}
@Override
public Activate setKey(java.lang.String key) {
return (Activate) super.setKey(key);
}
@Override
public Activate setOauthToken(java.lang.String oauthToken) {
return (Activate) super.setOauthToken(oauthToken);
}
@Override
public Activate setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Activate) super.setPrettyPrint(prettyPrint);
}
@Override
public Activate setQuotaUser(java.lang.String quotaUser) {
return (Activate) super.setQuotaUser(quotaUser);
}
@Override
public Activate setUploadType(java.lang.String uploadType) {
return (Activate) super.setUploadType(uploadType);
}
@Override
public Activate setUploadProtocol(java.lang.String uploadProtocol) {
return (Activate) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser that the manual trigger belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser that the manual trigger belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser that the manual trigger belongs. */
public Activate setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the manual trigger to activate. */
@com.google.api.client.util.Key
private java.lang.Long triggerId;
/** Required. The ID of the manual trigger to activate.
*/
public java.lang.Long getTriggerId() {
return triggerId;
}
/** Required. The ID of the manual trigger to activate. */
public Activate setTriggerId(java.lang.Long triggerId) {
this.triggerId = triggerId;
return this;
}
@Override
public Activate set(String parameterName, Object value) {
return (Activate) super.set(parameterName, value);
}
}
/**
* Creates a new manual trigger. Returns the newly created manual trigger if successful.
*
* Create a request for the method "manualTriggers.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.ManualTrigger}
* @return the request
*/
public Create create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.ManualTrigger content) throws java.io.IOException {
Create result = new Create(advertiserId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ManualTrigger> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/manualTriggers";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new manual trigger. Returns the newly created manual trigger if successful.
*
* Create a request for the method "manualTriggers.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
* @param content the {@link com.google.api.services.displayvideo.v1.model.ManualTrigger}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.ManualTrigger content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.ManualTrigger.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
*/
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deactivates a manual trigger.
*
* Create a request for the method "manualTriggers.deactivate".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Deactivate#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser that the manual trigger belongs.
* @param triggerId Required. The ID of the manual trigger to deactivate.
* @param content the {@link com.google.api.services.displayvideo.v1.model.DeactivateManualTriggerRequest}
* @return the request
*/
public Deactivate deactivate(java.lang.Long advertiserId, java.lang.Long triggerId, com.google.api.services.displayvideo.v1.model.DeactivateManualTriggerRequest content) throws java.io.IOException {
Deactivate result = new Deactivate(advertiserId, triggerId, content);
initialize(result);
return result;
}
public class Deactivate extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ManualTrigger> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/manualTriggers/{+triggerId}:deactivate";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TRIGGER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deactivates a manual trigger.
*
* Create a request for the method "manualTriggers.deactivate".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Deactivate#execute()} method to invoke the remote
* operation. <p> {@link
* Deactivate#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser that the manual trigger belongs.
* @param triggerId Required. The ID of the manual trigger to deactivate.
* @param content the {@link com.google.api.services.displayvideo.v1.model.DeactivateManualTriggerRequest}
* @since 1.13
*/
protected Deactivate(java.lang.Long advertiserId, java.lang.Long triggerId, com.google.api.services.displayvideo.v1.model.DeactivateManualTriggerRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.ManualTrigger.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.triggerId = com.google.api.client.util.Preconditions.checkNotNull(triggerId, "Required parameter triggerId must be specified.");
}
@Override
public Deactivate set$Xgafv(java.lang.String $Xgafv) {
return (Deactivate) super.set$Xgafv($Xgafv);
}
@Override
public Deactivate setAccessToken(java.lang.String accessToken) {
return (Deactivate) super.setAccessToken(accessToken);
}
@Override
public Deactivate setAlt(java.lang.String alt) {
return (Deactivate) super.setAlt(alt);
}
@Override
public Deactivate setCallback(java.lang.String callback) {
return (Deactivate) super.setCallback(callback);
}
@Override
public Deactivate setFields(java.lang.String fields) {
return (Deactivate) super.setFields(fields);
}
@Override
public Deactivate setKey(java.lang.String key) {
return (Deactivate) super.setKey(key);
}
@Override
public Deactivate setOauthToken(java.lang.String oauthToken) {
return (Deactivate) super.setOauthToken(oauthToken);
}
@Override
public Deactivate setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Deactivate) super.setPrettyPrint(prettyPrint);
}
@Override
public Deactivate setQuotaUser(java.lang.String quotaUser) {
return (Deactivate) super.setQuotaUser(quotaUser);
}
@Override
public Deactivate setUploadType(java.lang.String uploadType) {
return (Deactivate) super.setUploadType(uploadType);
}
@Override
public Deactivate setUploadProtocol(java.lang.String uploadProtocol) {
return (Deactivate) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser that the manual trigger belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser that the manual trigger belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser that the manual trigger belongs. */
public Deactivate setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the manual trigger to deactivate. */
@com.google.api.client.util.Key
private java.lang.Long triggerId;
/** Required. The ID of the manual trigger to deactivate.
*/
public java.lang.Long getTriggerId() {
return triggerId;
}
/** Required. The ID of the manual trigger to deactivate. */
public Deactivate setTriggerId(java.lang.Long triggerId) {
this.triggerId = triggerId;
return this;
}
@Override
public Deactivate set(String parameterName, Object value) {
return (Deactivate) super.set(parameterName, value);
}
}
/**
* Gets a manual trigger.
*
* Create a request for the method "manualTriggers.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser this manual trigger belongs to.
* @param triggerId Required. The ID of the manual trigger to fetch.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.Long triggerId) throws java.io.IOException {
Get result = new Get(advertiserId, triggerId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ManualTrigger> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/manualTriggers/{+triggerId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TRIGGER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a manual trigger.
*
* Create a request for the method "manualTriggers.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser this manual trigger belongs to.
* @param triggerId Required. The ID of the manual trigger to fetch.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.Long triggerId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ManualTrigger.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.triggerId = com.google.api.client.util.Preconditions.checkNotNull(triggerId, "Required parameter triggerId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser this manual trigger belongs to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser this manual trigger belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser this manual trigger belongs to. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the manual trigger to fetch. */
@com.google.api.client.util.Key
private java.lang.Long triggerId;
/** Required. The ID of the manual trigger to fetch.
*/
public java.lang.Long getTriggerId() {
return triggerId;
}
/** Required. The ID of the manual trigger to fetch. */
public Get setTriggerId(java.lang.Long triggerId) {
this.triggerId = triggerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists manual triggers that are accessible to the current user for a given advertiser ID. The
* order is defined by the order_by parameter. A single advertiser_id is required.
*
* Create a request for the method "manualTriggers.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser that the fetched manual triggers belong to.
* @return the request
*/
public List list(java.lang.Long advertiserId) throws java.io.IOException {
List result = new List(advertiserId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListManualTriggersResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/manualTriggers";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists manual triggers that are accessible to the current user for a given advertiser ID. The
* order is defined by the order_by parameter. A single advertiser_id is required.
*
* Create a request for the method "manualTriggers.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser that the fetched manual triggers belong to.
* @since 1.13
*/
protected List(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListManualTriggersResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser that the fetched manual triggers belong to. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser that the fetched manual triggers belong to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser that the fetched manual triggers belong to. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by manual trigger properties. Supported syntax: * Filter expressions are
* made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR`
* logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. *
* Supported fields: - `displayName` - `state` Examples: * All active manual triggers under
* an advertiser: `state="ACTIVE"` The length of this field should be no more than 500
* characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by manual trigger properties. Supported syntax: * Filter expressions are made up
of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A
sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator}
{value}`. * The operator must be `EQUALS (=)`. * Supported fields: - `displayName` - `state`
Examples: * All active manual triggers under an advertiser: `state="ACTIVE"` The length of this
field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by manual trigger properties. Supported syntax: * Filter expressions are
* made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR`
* logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. *
* Supported fields: - `displayName` - `state` Examples: * All active manual triggers under
* an advertiser: `state="ACTIVE"` The length of this field should be no more than 500
* characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `state` The default sorting order is ascending. To specify descending order for a field,
* a suffix "desc" should be added to the field name. For example, `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` (default) * `state` The
default sorting order is ascending. To specify descending order for a field, a suffix "desc" should
be added to the field name. For example, `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `state` The default sorting order is ascending. To specify descending order for a field,
* a suffix "desc" should be added to the field name. For example, `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListManualTriggers` method.
* If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListManualTriggers` method. If not specified,
the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListManualTriggers` method.
* If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates a manual trigger. Returns the updated manual trigger if successful.
*
* Create a request for the method "manualTriggers.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
* @param triggerId Output only. The unique ID of the manual trigger.
* @param content the {@link com.google.api.services.displayvideo.v1.model.ManualTrigger}
* @return the request
*/
public Patch patch(java.lang.Long advertiserId, java.lang.Long triggerId, com.google.api.services.displayvideo.v1.model.ManualTrigger content) throws java.io.IOException {
Patch result = new Patch(advertiserId, triggerId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ManualTrigger> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/manualTriggers/{+triggerId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TRIGGER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates a manual trigger. Returns the updated manual trigger if successful.
*
* Create a request for the method "manualTriggers.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
* @param triggerId Output only. The unique ID of the manual trigger.
* @param content the {@link com.google.api.services.displayvideo.v1.model.ManualTrigger}
* @since 1.13
*/
protected Patch(java.lang.Long advertiserId, java.lang.Long triggerId, com.google.api.services.displayvideo.v1.model.ManualTrigger content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.ManualTrigger.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.triggerId = com.google.api.client.util.Preconditions.checkNotNull(triggerId, "Required parameter triggerId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* Required. Immutable. The unique ID of the advertiser that the manual trigger belongs to.
*/
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Output only. The unique ID of the manual trigger. */
@com.google.api.client.util.Key
private java.lang.Long triggerId;
/** Output only. The unique ID of the manual trigger.
*/
public java.lang.Long getTriggerId() {
return triggerId;
}
/** Output only. The unique ID of the manual trigger. */
public Patch setTriggerId(java.lang.Long triggerId) {
this.triggerId = triggerId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the NegativeKeywordLists collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.NegativeKeywordLists.List request = displayvideo.negativeKeywordLists().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public NegativeKeywordLists negativeKeywordLists() {
return new NegativeKeywordLists();
}
/**
* The "negativeKeywordLists" collection of methods.
*/
public class NegativeKeywordLists {
/**
* Creates a new negative keyword list. Returns the newly created negative keyword list if
* successful.
*
* Create a request for the method "negativeKeywordLists.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the negative keyword list will belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.NegativeKeywordList}
* @return the request
*/
public Create create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.NegativeKeywordList content) throws java.io.IOException {
Create result = new Create(advertiserId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.NegativeKeywordList> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/negativeKeywordLists";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new negative keyword list. Returns the newly created negative keyword list if
* successful.
*
* Create a request for the method "negativeKeywordLists.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the negative keyword list will belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.NegativeKeywordList}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, com.google.api.services.displayvideo.v1.model.NegativeKeywordList content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.NegativeKeywordList.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the DV360 advertiser to which the negative keyword list will belong.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the negative keyword list will belong.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* Required. The ID of the DV360 advertiser to which the negative keyword list will belong.
*/
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a negative keyword list given an advertiser ID and a negative keyword list ID.
*
* Create a request for the method "negativeKeywordLists.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the negative keyword list to delete.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId) throws java.io.IOException {
Delete result = new Delete(advertiserId, negativeKeywordListId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/negativeKeywordLists/{+negativeKeywordListId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern NEGATIVE_KEYWORD_LIST_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes a negative keyword list given an advertiser ID and a negative keyword list ID.
*
* Create a request for the method "negativeKeywordLists.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the negative keyword list to delete.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.negativeKeywordListId = com.google.api.client.util.Preconditions.checkNotNull(negativeKeywordListId, "Required parameter negativeKeywordListId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the negative keyword list belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the negative keyword list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the negative keyword list belongs. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the negative keyword list to delete. */
@com.google.api.client.util.Key
private java.lang.Long negativeKeywordListId;
/** Required. The ID of the negative keyword list to delete.
*/
public java.lang.Long getNegativeKeywordListId() {
return negativeKeywordListId;
}
/** Required. The ID of the negative keyword list to delete. */
public Delete setNegativeKeywordListId(java.lang.Long negativeKeywordListId) {
this.negativeKeywordListId = negativeKeywordListId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets a negative keyword list given an advertiser ID and a negative keyword list ID.
*
* Create a request for the method "negativeKeywordLists.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the fetched negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the negative keyword list to fetch.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId) throws java.io.IOException {
Get result = new Get(advertiserId, negativeKeywordListId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.NegativeKeywordList> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/negativeKeywordLists/{+negativeKeywordListId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern NEGATIVE_KEYWORD_LIST_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a negative keyword list given an advertiser ID and a negative keyword list ID.
*
* Create a request for the method "negativeKeywordLists.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the fetched negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the negative keyword list to fetch.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.NegativeKeywordList.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.negativeKeywordListId = com.google.api.client.util.Preconditions.checkNotNull(negativeKeywordListId, "Required parameter negativeKeywordListId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the DV360 advertiser to which the fetched negative keyword list
* belongs.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the fetched negative keyword list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* Required. The ID of the DV360 advertiser to which the fetched negative keyword list
* belongs.
*/
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The ID of the negative keyword list to fetch. */
@com.google.api.client.util.Key
private java.lang.Long negativeKeywordListId;
/** Required. The ID of the negative keyword list to fetch.
*/
public java.lang.Long getNegativeKeywordListId() {
return negativeKeywordListId;
}
/** Required. The ID of the negative keyword list to fetch. */
public Get setNegativeKeywordListId(java.lang.Long negativeKeywordListId) {
this.negativeKeywordListId = negativeKeywordListId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists negative keyword lists based on a given advertiser id.
*
* Create a request for the method "negativeKeywordLists.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the fetched negative keyword lists belong.
* @return the request
*/
public List list(java.lang.Long advertiserId) throws java.io.IOException {
List result = new List(advertiserId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListNegativeKeywordListsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/negativeKeywordLists";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists negative keyword lists based on a given advertiser id.
*
* Create a request for the method "negativeKeywordLists.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the fetched negative keyword lists belong.
* @since 1.13
*/
protected List(java.lang.Long advertiserId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListNegativeKeywordListsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the DV360 advertiser to which the fetched negative keyword lists
* belong.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the fetched negative keyword lists belong.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* Required. The ID of the DV360 advertiser to which the fetched negative keyword lists
* belong.
*/
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. Defaults to `100` if not set. Returns
* error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. Defaults to `100` if not set. Returns error
code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. Defaults to `100` if not set. Returns
* error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListNegativeKeywordLists`
* method. If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListNegativeKeywordLists` method. If not
specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListNegativeKeywordLists`
* method. If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates a negative keyword list. Returns the updated negative keyword list if successful.
*
* Create a request for the method "negativeKeywordLists.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the negative keyword list belongs.
* @param negativeKeywordListId Output only. The unique ID of the negative keyword list. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.NegativeKeywordList}
* @return the request
*/
public Patch patch(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId, com.google.api.services.displayvideo.v1.model.NegativeKeywordList content) throws java.io.IOException {
Patch result = new Patch(advertiserId, negativeKeywordListId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.NegativeKeywordList> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/negativeKeywordLists/{negativeKeywordListId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates a negative keyword list. Returns the updated negative keyword list if successful.
*
* Create a request for the method "negativeKeywordLists.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the negative keyword list belongs.
* @param negativeKeywordListId Output only. The unique ID of the negative keyword list. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.NegativeKeywordList}
* @since 1.13
*/
protected Patch(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId, com.google.api.services.displayvideo.v1.model.NegativeKeywordList content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.NegativeKeywordList.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.negativeKeywordListId = com.google.api.client.util.Preconditions.checkNotNull(negativeKeywordListId, "Required parameter negativeKeywordListId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the DV360 advertiser to which the negative keyword list belongs. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the negative keyword list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the DV360 advertiser to which the negative keyword list belongs. */
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Output only. The unique ID of the negative keyword list. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long negativeKeywordListId;
/** Output only. The unique ID of the negative keyword list. Assigned by the system.
*/
public java.lang.Long getNegativeKeywordListId() {
return negativeKeywordListId;
}
/** Output only. The unique ID of the negative keyword list. Assigned by the system. */
public Patch setNegativeKeywordListId(java.lang.Long negativeKeywordListId) {
this.negativeKeywordListId = negativeKeywordListId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the NegativeKeywords collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.NegativeKeywords.List request = displayvideo.negativeKeywords().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public NegativeKeywords negativeKeywords() {
return new NegativeKeywords();
}
/**
* The "negativeKeywords" collection of methods.
*/
public class NegativeKeywords {
/**
* Bulk edits negative keywords in a single negative keyword list. The operation will delete the
* negative keywords provided in BulkEditNegativeKeywordsRequest.deleted_negative_keywords and then
* create the negative keywords provided in
* BulkEditNegativeKeywordsRequest.created_negative_keywords. This operation is guaranteed to be
* atomic and will never result in a partial success or partial failure.
*
* Create a request for the method "negativeKeywords.bulkEdit".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the parent negative keyword list to which the negative keywords belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditNegativeKeywordsRequest}
* @return the request
*/
public BulkEdit bulkEdit(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId, com.google.api.services.displayvideo.v1.model.BulkEditNegativeKeywordsRequest content) throws java.io.IOException {
BulkEdit result = new BulkEdit(advertiserId, negativeKeywordListId, content);
initialize(result);
return result;
}
public class BulkEdit extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkEditNegativeKeywordsResponse> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/negativeKeywordLists/{+negativeKeywordListId}/negativeKeywords:bulkEdit";
private final java.util.regex.Pattern NEGATIVE_KEYWORD_LIST_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Bulk edits negative keywords in a single negative keyword list. The operation will delete the
* negative keywords provided in BulkEditNegativeKeywordsRequest.deleted_negative_keywords and
* then create the negative keywords provided in
* BulkEditNegativeKeywordsRequest.created_negative_keywords. This operation is guaranteed to be
* atomic and will never result in a partial success or partial failure.
*
* Create a request for the method "negativeKeywords.bulkEdit".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
* <p> {@link
* BulkEdit#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the parent negative keyword list to which the negative keywords belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditNegativeKeywordsRequest}
* @since 1.13
*/
protected BulkEdit(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId, com.google.api.services.displayvideo.v1.model.BulkEditNegativeKeywordsRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.BulkEditNegativeKeywordsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.negativeKeywordListId = com.google.api.client.util.Preconditions.checkNotNull(negativeKeywordListId, "Required parameter negativeKeywordListId must be specified.");
}
@Override
public BulkEdit set$Xgafv(java.lang.String $Xgafv) {
return (BulkEdit) super.set$Xgafv($Xgafv);
}
@Override
public BulkEdit setAccessToken(java.lang.String accessToken) {
return (BulkEdit) super.setAccessToken(accessToken);
}
@Override
public BulkEdit setAlt(java.lang.String alt) {
return (BulkEdit) super.setAlt(alt);
}
@Override
public BulkEdit setCallback(java.lang.String callback) {
return (BulkEdit) super.setCallback(callback);
}
@Override
public BulkEdit setFields(java.lang.String fields) {
return (BulkEdit) super.setFields(fields);
}
@Override
public BulkEdit setKey(java.lang.String key) {
return (BulkEdit) super.setKey(key);
}
@Override
public BulkEdit setOauthToken(java.lang.String oauthToken) {
return (BulkEdit) super.setOauthToken(oauthToken);
}
@Override
public BulkEdit setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkEdit) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkEdit setQuotaUser(java.lang.String quotaUser) {
return (BulkEdit) super.setQuotaUser(quotaUser);
}
@Override
public BulkEdit setUploadType(java.lang.String uploadType) {
return (BulkEdit) super.setUploadType(uploadType);
}
@Override
public BulkEdit setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkEdit) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the DV360 advertiser to which the parent negative keyword list
* belongs.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* Required. The ID of the DV360 advertiser to which the parent negative keyword list
* belongs.
*/
public BulkEdit setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Required. The ID of the parent negative keyword list to which the negative keywords
* belong.
*/
@com.google.api.client.util.Key
private java.lang.Long negativeKeywordListId;
/** Required. The ID of the parent negative keyword list to which the negative keywords belong.
*/
public java.lang.Long getNegativeKeywordListId() {
return negativeKeywordListId;
}
/**
* Required. The ID of the parent negative keyword list to which the negative keywords
* belong.
*/
public BulkEdit setNegativeKeywordListId(java.lang.Long negativeKeywordListId) {
this.negativeKeywordListId = negativeKeywordListId;
return this;
}
@Override
public BulkEdit set(String parameterName, Object value) {
return (BulkEdit) super.set(parameterName, value);
}
}
/**
* Creates a negative keyword in a negative keyword list.
*
* Create a request for the method "negativeKeywords.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the parent negative keyword list in which the negative keyword will be created.
* @param content the {@link com.google.api.services.displayvideo.v1.model.NegativeKeyword}
* @return the request
*/
public Create create(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId, com.google.api.services.displayvideo.v1.model.NegativeKeyword content) throws java.io.IOException {
Create result = new Create(advertiserId, negativeKeywordListId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.NegativeKeyword> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/negativeKeywordLists/{+negativeKeywordListId}/negativeKeywords";
private final java.util.regex.Pattern NEGATIVE_KEYWORD_LIST_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a negative keyword in a negative keyword list.
*
* Create a request for the method "negativeKeywords.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the parent negative keyword list in which the negative keyword will be created.
* @param content the {@link com.google.api.services.displayvideo.v1.model.NegativeKeyword}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId, com.google.api.services.displayvideo.v1.model.NegativeKeyword content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.NegativeKeyword.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.negativeKeywordListId = com.google.api.client.util.Preconditions.checkNotNull(negativeKeywordListId, "Required parameter negativeKeywordListId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the DV360 advertiser to which the parent negative keyword list
* belongs.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* Required. The ID of the DV360 advertiser to which the parent negative keyword list
* belongs.
*/
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Required. The ID of the parent negative keyword list in which the negative keyword will
* be created.
*/
@com.google.api.client.util.Key
private java.lang.Long negativeKeywordListId;
/** Required. The ID of the parent negative keyword list in which the negative keyword will be created.
*/
public java.lang.Long getNegativeKeywordListId() {
return negativeKeywordListId;
}
/**
* Required. The ID of the parent negative keyword list in which the negative keyword will
* be created.
*/
public Create setNegativeKeywordListId(java.lang.Long negativeKeywordListId) {
this.negativeKeywordListId = negativeKeywordListId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a negative keyword from a negative keyword list.
*
* Create a request for the method "negativeKeywords.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the parent negative keyword list to which the negative keyword belongs.
* @param keywordValue Required. The keyword value of the negative keyword to delete.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId, java.lang.String keywordValue) throws java.io.IOException {
Delete result = new Delete(advertiserId, negativeKeywordListId, keywordValue);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{advertiserId}/negativeKeywordLists/{+negativeKeywordListId}/negativeKeywords/{+keywordValue}";
private final java.util.regex.Pattern NEGATIVE_KEYWORD_LIST_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern KEYWORD_VALUE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes a negative keyword from a negative keyword list.
*
* Create a request for the method "negativeKeywords.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the parent negative keyword list to which the negative keyword belongs.
* @param keywordValue Required. The keyword value of the negative keyword to delete.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId, java.lang.String keywordValue) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.negativeKeywordListId = com.google.api.client.util.Preconditions.checkNotNull(negativeKeywordListId, "Required parameter negativeKeywordListId must be specified.");
this.keywordValue = com.google.api.client.util.Preconditions.checkNotNull(keywordValue, "Required parameter keywordValue must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(KEYWORD_VALUE_PATTERN.matcher(keywordValue).matches(),
"Parameter keywordValue must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the DV360 advertiser to which the parent negative keyword list
* belongs.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* Required. The ID of the DV360 advertiser to which the parent negative keyword list
* belongs.
*/
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Required. The ID of the parent negative keyword list to which the negative keyword
* belongs.
*/
@com.google.api.client.util.Key
private java.lang.Long negativeKeywordListId;
/** Required. The ID of the parent negative keyword list to which the negative keyword belongs.
*/
public java.lang.Long getNegativeKeywordListId() {
return negativeKeywordListId;
}
/**
* Required. The ID of the parent negative keyword list to which the negative keyword
* belongs.
*/
public Delete setNegativeKeywordListId(java.lang.Long negativeKeywordListId) {
this.negativeKeywordListId = negativeKeywordListId;
return this;
}
/** Required. The keyword value of the negative keyword to delete. */
@com.google.api.client.util.Key
private java.lang.String keywordValue;
/** Required. The keyword value of the negative keyword to delete.
*/
public java.lang.String getKeywordValue() {
return keywordValue;
}
/** Required. The keyword value of the negative keyword to delete. */
public Delete setKeywordValue(java.lang.String keywordValue) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(KEYWORD_VALUE_PATTERN.matcher(keywordValue).matches(),
"Parameter keywordValue must conform to the pattern " +
"^[^/]+$");
}
this.keywordValue = keywordValue;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Lists negative keywords in a negative keyword list.
*
* Create a request for the method "negativeKeywords.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the parent negative keyword list to which the requested negative keywords
* belong.
* @return the request
*/
public List list(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId) throws java.io.IOException {
List result = new List(advertiserId, negativeKeywordListId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListNegativeKeywordsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/negativeKeywordLists/{+negativeKeywordListId}/negativeKeywords";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern NEGATIVE_KEYWORD_LIST_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists negative keywords in a negative keyword list.
*
* Create a request for the method "negativeKeywords.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
* @param negativeKeywordListId Required. The ID of the parent negative keyword list to which the requested negative keywords
* belong.
* @since 1.13
*/
protected List(java.lang.Long advertiserId, java.lang.Long negativeKeywordListId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListNegativeKeywordsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.negativeKeywordListId = com.google.api.client.util.Preconditions.checkNotNull(negativeKeywordListId, "Required parameter negativeKeywordListId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the DV360 advertiser to which the parent negative keyword list
* belongs.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the DV360 advertiser to which the parent negative keyword list belongs.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* Required. The ID of the DV360 advertiser to which the parent negative keyword list
* belongs.
*/
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Required. The ID of the parent negative keyword list to which the requested negative
* keywords belong.
*/
@com.google.api.client.util.Key
private java.lang.Long negativeKeywordListId;
/** Required. The ID of the parent negative keyword list to which the requested negative keywords
belong.
*/
public java.lang.Long getNegativeKeywordListId() {
return negativeKeywordListId;
}
/**
* Required. The ID of the parent negative keyword list to which the requested negative
* keywords belong.
*/
public List setNegativeKeywordListId(java.lang.Long negativeKeywordListId) {
this.negativeKeywordListId = negativeKeywordListId;
return this;
}
/**
* Allows filtering by negative keyword fields. Supported syntax: * Filter expressions for
* negative keyword currently can only contain at most one * restriction. * A restriction
* has the form of `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. *
* Supported fields: - `keywordValue` Examples: * All negative keywords for which the
* keyword value contains "google": `keywordValue : "google"`
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by negative keyword fields. Supported syntax: * Filter expressions for negative
keyword currently can only contain at most one * restriction. * A restriction has the form of
`{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields: -
`keywordValue` Examples: * All negative keywords for which the keyword value contains "google":
`keywordValue : "google"`
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by negative keyword fields. Supported syntax: * Filter expressions for
* negative keyword currently can only contain at most one * restriction. * A restriction
* has the form of `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. *
* Supported fields: - `keywordValue` Examples: * All negative keywords for which the
* keyword value contains "google": `keywordValue : "google"`
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `keywordValue` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix "
* desc" should be added to the field name. Example: `keywordValue desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `keywordValue` (default) The default
sorting order is ascending. To specify descending order for a field, a suffix " desc" should be
added to the field name. Example: `keywordValue desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `keywordValue` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix "
* desc" should be added to the field name. Example: `keywordValue desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListNegativeKeywords`
* method. If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListNegativeKeywords` method. If not specified,
the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListNegativeKeywords`
* method. If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the TargetingTypes collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.TargetingTypes.List request = displayvideo.targetingTypes().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public TargetingTypes targetingTypes() {
return new TargetingTypes();
}
/**
* The "targetingTypes" collection of methods.
*/
public class TargetingTypes {
/**
* An accessor for creating requests from the AssignedTargetingOptions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.AssignedTargetingOptions.List request = displayvideo.assignedTargetingOptions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public AssignedTargetingOptions assignedTargetingOptions() {
return new AssignedTargetingOptions();
}
/**
* The "assignedTargetingOptions" collection of methods.
*/
public class AssignedTargetingOptions {
/**
* Assigns a targeting option to an advertiser. Returns the assigned targeting option if successful.
*
* Create a request for the method "assignedTargetingOptions.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedTargetingOption}
* @return the request
*/
public Create create(java.lang.Long advertiserId, java.lang.String targetingType, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption content) throws java.io.IOException {
Create result = new Create(advertiserId, targetingType, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.AssignedTargetingOption> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/targetingTypes/{+targetingType}/assignedTargetingOptions";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Assigns a targeting option to an advertiser. Returns the assigned targeting option if
* successful.
*
* Create a request for the method "assignedTargetingOptions.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedTargetingOption}
* @since 1.13
*/
protected Create(java.lang.Long advertiserId, java.lang.String targetingType, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of this assigned targeting option. Supported targeting types: *
`TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
`TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
public Create setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes an assigned targeting option from an advertiser.
*
* Create a request for the method "assignedTargetingOptions.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
* @param assignedTargetingOptionId Required. The ID of the assigned targeting option to delete.
* @return the request
*/
public Delete delete(java.lang.Long advertiserId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) throws java.io.IOException {
Delete result = new Delete(advertiserId, targetingType, assignedTargetingOptionId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/targetingTypes/{+targetingType}/assignedTargetingOptions/{+assignedTargetingOptionId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ASSIGNED_TARGETING_OPTION_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes an assigned targeting option from an advertiser.
*
* Create a request for the method "assignedTargetingOptions.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
* @param assignedTargetingOptionId Required. The ID of the assigned targeting option to delete.
* @since 1.13
*/
protected Delete(java.lang.Long advertiserId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = com.google.api.client.util.Preconditions.checkNotNull(assignedTargetingOptionId, "Required parameter assignedTargetingOptionId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of this assigned targeting option. Supported targeting types: *
`TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
`TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
public Delete setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/** Required. The ID of the assigned targeting option to delete. */
@com.google.api.client.util.Key
private java.lang.String assignedTargetingOptionId;
/** Required. The ID of the assigned targeting option to delete.
*/
public java.lang.String getAssignedTargetingOptionId() {
return assignedTargetingOptionId;
}
/** Required. The ID of the assigned targeting option to delete. */
public Delete setAssignedTargetingOptionId(java.lang.String assignedTargetingOptionId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = assignedTargetingOptionId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets a single targeting option assigned to an advertiser.
*
* Create a request for the method "assignedTargetingOptions.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
* @param assignedTargetingOptionId Required. An identifier unique to the targeting type in this advertiser that identifies the assigned
* targeting option being requested.
* @return the request
*/
public Get get(java.lang.Long advertiserId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) throws java.io.IOException {
Get result = new Get(advertiserId, targetingType, assignedTargetingOptionId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.AssignedTargetingOption> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/targetingTypes/{+targetingType}/assignedTargetingOptions/{+assignedTargetingOptionId}";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ASSIGNED_TARGETING_OPTION_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a single targeting option assigned to an advertiser.
*
* Create a request for the method "assignedTargetingOptions.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
* @param assignedTargetingOptionId Required. An identifier unique to the targeting type in this advertiser that identifies the assigned
* targeting option being requested.
* @since 1.13
*/
protected Get(java.lang.Long advertiserId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = com.google.api.client.util.Preconditions.checkNotNull(assignedTargetingOptionId, "Required parameter assignedTargetingOptionId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of this assigned targeting option. Supported targeting types: *
`TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
`TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
public Get setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/**
* Required. An identifier unique to the targeting type in this advertiser that identifies
* the assigned targeting option being requested.
*/
@com.google.api.client.util.Key
private java.lang.String assignedTargetingOptionId;
/** Required. An identifier unique to the targeting type in this advertiser that identifies the
assigned targeting option being requested.
*/
public java.lang.String getAssignedTargetingOptionId() {
return assignedTargetingOptionId;
}
/**
* Required. An identifier unique to the targeting type in this advertiser that identifies
* the assigned targeting option being requested.
*/
public Get setAssignedTargetingOptionId(java.lang.String assignedTargetingOptionId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = assignedTargetingOptionId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists the targeting options assigned to an advertiser.
*
* Create a request for the method "assignedTargetingOptions.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param advertiserId Required. The ID of the advertiser.
* @param targetingType Required. Identifies the type of assigned targeting options to list. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
* @return the request
*/
public List list(java.lang.Long advertiserId, java.lang.String targetingType) throws java.io.IOException {
List result = new List(advertiserId, targetingType);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListAdvertiserAssignedTargetingOptionsResponse> {
private static final String REST_PATH = "v1/advertisers/{+advertiserId}/targetingTypes/{+targetingType}/assignedTargetingOptions";
private final java.util.regex.Pattern ADVERTISER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists the targeting options assigned to an advertiser.
*
* Create a request for the method "assignedTargetingOptions.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param advertiserId Required. The ID of the advertiser.
* @param targetingType Required. Identifies the type of assigned targeting options to list. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
* @since 1.13
*/
protected List(java.lang.Long advertiserId, java.lang.String targetingType) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListAdvertiserAssignedTargetingOptionsResponse.class);
this.advertiserId = com.google.api.client.util.Preconditions.checkNotNull(advertiserId, "Required parameter advertiserId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the advertiser. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The ID of the advertiser.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The ID of the advertiser. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Required. Identifies the type of assigned targeting options to list. Supported
* targeting types: * `TARGETING_TYPE_CHANNEL` *
* `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of assigned targeting options to list. Supported targeting types: *
`TARGETING_TYPE_CHANNEL` * `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
`TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/**
* Required. Identifies the type of assigned targeting options to list. Supported
* targeting types: * `TARGETING_TYPE_CHANNEL` *
* `TARGETING_TYPE_DIGITAL_CONTENT_LABEL_EXCLUSION` *
* `TARGETING_TYPE_SENSITIVE_CATEGORY_EXCLUSION`
*/
public List setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR`. * A restriction has the form of `{field} {operator}
* {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `assignedTargetingOptionId` Examples: * AssignedTargetingOption with ID 123456
* `assignedTargetingOptionId="123456"` The length of this field should be no more than
* 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by assigned targeting option properties. Supported syntax: * Filter expressions
are made up of one or more restrictions. * Restrictions can be combined by the logical operator
`OR`. * A restriction has the form of `{field} {operator} {value}`. * The operator must be `EQUALS
(=)`. * Supported fields: - `assignedTargetingOptionId` Examples: * AssignedTargetingOption with ID
123456 `assignedTargetingOptionId="123456"` The length of this field should be no more than 500
characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR`. * A restriction has the form of `{field} {operator}
* {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `assignedTargetingOptionId` Examples: * AssignedTargetingOption with ID 123456
* `assignedTargetingOptionId="123456"` The length of this field should be no more than
* 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedTargetingOptionId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example:
* `assignedTargetingOptionId desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `assignedTargetingOptionId` (default) The
default sorting order is ascending. To specify descending order for a field, a suffix "desc" should
be added to the field name. Example: `assignedTargetingOptionId desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedTargetingOptionId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example:
* `assignedTargetingOptionId desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to
* `ListAdvertiserAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListAdvertiserAssignedTargetingOptions` method.
If not specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to
* `ListAdvertiserAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
}
/**
* An accessor for creating requests from the CombinedAudiences collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.CombinedAudiences.List request = displayvideo.combinedAudiences().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public CombinedAudiences combinedAudiences() {
return new CombinedAudiences();
}
/**
* The "combinedAudiences" collection of methods.
*/
public class CombinedAudiences {
/**
* Gets a combined audience.
*
* Create a request for the method "combinedAudiences.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param combinedAudienceId Required. The ID of the combined audience to fetch.
* @return the request
*/
public Get get(java.lang.Long combinedAudienceId) throws java.io.IOException {
Get result = new Get(combinedAudienceId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.CombinedAudience> {
private static final String REST_PATH = "v1/combinedAudiences/{+combinedAudienceId}";
private final java.util.regex.Pattern COMBINED_AUDIENCE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a combined audience.
*
* Create a request for the method "combinedAudiences.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param combinedAudienceId Required. The ID of the combined audience to fetch.
* @since 1.13
*/
protected Get(java.lang.Long combinedAudienceId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.CombinedAudience.class);
this.combinedAudienceId = com.google.api.client.util.Preconditions.checkNotNull(combinedAudienceId, "Required parameter combinedAudienceId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the combined audience to fetch. */
@com.google.api.client.util.Key
private java.lang.Long combinedAudienceId;
/** Required. The ID of the combined audience to fetch.
*/
public java.lang.Long getCombinedAudienceId() {
return combinedAudienceId;
}
/** Required. The ID of the combined audience to fetch. */
public Get setCombinedAudienceId(java.lang.Long combinedAudienceId) {
this.combinedAudienceId = combinedAudienceId;
return this;
}
/** The ID of the advertiser that has access to the fetched combined audience. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the fetched combined audience.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that has access to the fetched combined audience. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** The ID of the partner that has access to the fetched combined audience. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the fetched combined audience.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that has access to the fetched combined audience. */
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists combined audiences. The order is defined by the order_by parameter.
*
* Create a request for the method "combinedAudiences.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListCombinedAudiencesResponse> {
private static final String REST_PATH = "v1/combinedAudiences";
/**
* Lists combined audiences. The order is defined by the order_by parameter.
*
* Create a request for the method "combinedAudiences.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListCombinedAudiencesResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that has access to the fetched combined audiences. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the fetched combined audiences.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that has access to the fetched combined audiences. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by combined audience fields. Supported syntax: * Filter expressions for
* combined audiences currently can only contain at most one restriction. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. *
* Supported fields: - `displayName` Examples: * All combined audiences for which the display
* name contains "Google": `displayName : "Google"`. The length of this field should be no
* more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by combined audience fields. Supported syntax: * Filter expressions for combined
audiences currently can only contain at most one restriction. * A restriction has the form of
`{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields: -
`displayName` Examples: * All combined audiences for which the display name contains "Google":
`displayName : "Google"`. The length of this field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by combined audience fields. Supported syntax: * Filter expressions for
* combined audiences currently can only contain at most one restriction. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. *
* Supported fields: - `displayName` Examples: * All combined audiences for which the display
* name contains "Google": `displayName : "Google"`. The length of this field should be no
* more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `combinedAudienceId` (default) *
* `displayName` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `combinedAudienceId` (default) *
`displayName` The default sorting order is ascending. To specify descending order for a field, a
suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `combinedAudienceId` (default) *
* `displayName` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCombinedAudiences` method.
* If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListCombinedAudiences` method. If not
specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCombinedAudiences` method.
* If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** The ID of the partner that has access to the fetched combined audiences. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the fetched combined audiences.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that has access to the fetched combined audiences. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the CustomBiddingAlgorithms collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.CustomBiddingAlgorithms.List request = displayvideo.customBiddingAlgorithms().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public CustomBiddingAlgorithms customBiddingAlgorithms() {
return new CustomBiddingAlgorithms();
}
/**
* The "customBiddingAlgorithms" collection of methods.
*/
public class CustomBiddingAlgorithms {
/**
* Gets a custom bidding algorithm.
*
* Create a request for the method "customBiddingAlgorithms.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param customBiddingAlgorithmId Required. The ID of the custom bidding algorithm to fetch.
* @return the request
*/
public Get get(java.lang.Long customBiddingAlgorithmId) throws java.io.IOException {
Get result = new Get(customBiddingAlgorithmId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.CustomBiddingAlgorithm> {
private static final String REST_PATH = "v1/customBiddingAlgorithms/{+customBiddingAlgorithmId}";
private final java.util.regex.Pattern CUSTOM_BIDDING_ALGORITHM_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a custom bidding algorithm.
*
* Create a request for the method "customBiddingAlgorithms.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param customBiddingAlgorithmId Required. The ID of the custom bidding algorithm to fetch.
* @since 1.13
*/
protected Get(java.lang.Long customBiddingAlgorithmId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.CustomBiddingAlgorithm.class);
this.customBiddingAlgorithmId = com.google.api.client.util.Preconditions.checkNotNull(customBiddingAlgorithmId, "Required parameter customBiddingAlgorithmId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the custom bidding algorithm to fetch. */
@com.google.api.client.util.Key
private java.lang.Long customBiddingAlgorithmId;
/** Required. The ID of the custom bidding algorithm to fetch.
*/
public java.lang.Long getCustomBiddingAlgorithmId() {
return customBiddingAlgorithmId;
}
/** Required. The ID of the custom bidding algorithm to fetch. */
public Get setCustomBiddingAlgorithmId(java.lang.Long customBiddingAlgorithmId) {
this.customBiddingAlgorithmId = customBiddingAlgorithmId;
return this;
}
/** The ID of the DV360 partner that has access to the custom bidding algorithm. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the DV360 partner that has access to the custom bidding algorithm.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the DV360 partner that has access to the custom bidding algorithm. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** The ID of the DV360 partner that has access to the custom bidding algorithm. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the DV360 partner that has access to the custom bidding algorithm.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the DV360 partner that has access to the custom bidding algorithm. */
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists custom bidding algorithms that are accessible to the current user and can be used in
* bidding stratgies. The order is defined by the order_by parameter.
*
* Create a request for the method "customBiddingAlgorithms.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListCustomBiddingAlgorithmsResponse> {
private static final String REST_PATH = "v1/customBiddingAlgorithms";
/**
* Lists custom bidding algorithms that are accessible to the current user and can be used in
* bidding stratgies. The order is defined by the order_by parameter.
*
* Create a request for the method "customBiddingAlgorithms.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListCustomBiddingAlgorithmsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the DV360 advertiser that has access to the custom bidding algorithm. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the DV360 advertiser that has access to the custom bidding algorithm.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the DV360 advertiser that has access to the custom bidding algorithm. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by custom bidding algorithm fields. Supported syntax: * Filter expressions
* are made up of one or more restrictions. * Restrictions can be combined by `AND`. A
* sequence of restrictions * implicitly uses `AND`. * A restriction has the form of `{field}
* {operator} {value}`. * The operator must be `CONTAINS (:)` or `EQUALS (=)`. * The operator
* must be `CONTAINS (:)` for the following field: - `displayName` * The operator must be
* `EQUALS (=)` for the following field: - `customBiddingAlgorithmType` * For `displayName`,
* the value is a string. We return all custom bidding algorithms whose display_name contains
* such string. * For `customBiddingAlgorithmType`, the value is a string. We return all
* algorithms whose custom_bidding_algorithm_type is equal to the given type. Examples: * All
* custom bidding algorithms for which the display name contains "politics":
* `displayName:politics`. * All custom bidding algorithms for which the type is
* "SCRIPT_BASED": `customBiddingAlgorithmType=SCRIPT_BASED` The length of this field should
* be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by custom bidding algorithm fields. Supported syntax: * Filter expressions are
made up of one or more restrictions. * Restrictions can be combined by `AND`. A sequence of
restrictions * implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`.
* The operator must be `CONTAINS (:)` or `EQUALS (=)`. * The operator must be `CONTAINS (:)` for
the following field: - `displayName` * The operator must be `EQUALS (=)` for the following field: -
`customBiddingAlgorithmType` * For `displayName`, the value is a string. We return all custom
bidding algorithms whose display_name contains such string. * For `customBiddingAlgorithmType`, the
value is a string. We return all algorithms whose custom_bidding_algorithm_type is equal to the
given type. Examples: * All custom bidding algorithms for which the display name contains
"politics": `displayName:politics`. * All custom bidding algorithms for which the type is
"SCRIPT_BASED": `customBiddingAlgorithmType=SCRIPT_BASED` The length of this field should be no
more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by custom bidding algorithm fields. Supported syntax: * Filter expressions
* are made up of one or more restrictions. * Restrictions can be combined by `AND`. A
* sequence of restrictions * implicitly uses `AND`. * A restriction has the form of `{field}
* {operator} {value}`. * The operator must be `CONTAINS (:)` or `EQUALS (=)`. * The operator
* must be `CONTAINS (:)` for the following field: - `displayName` * The operator must be
* `EQUALS (=)` for the following field: - `customBiddingAlgorithmType` * For `displayName`,
* the value is a string. We return all custom bidding algorithms whose display_name contains
* such string. * For `customBiddingAlgorithmType`, the value is a string. We return all
* algorithms whose custom_bidding_algorithm_type is equal to the given type. Examples: * All
* custom bidding algorithms for which the display name contains "politics":
* `displayName:politics`. * All custom bidding algorithms for which the type is
* "SCRIPT_BASED": `customBiddingAlgorithmType=SCRIPT_BASED` The length of this field should
* be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` (default) The default
sorting order is ascending. To specify descending order for a field, a suffix "desc" should be
added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCustomBiddingAlgorithms`
* method. If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListCustomBiddingAlgorithms` method. If not
specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCustomBiddingAlgorithms`
* method. If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** The ID of the DV360 partner that has access to the custom bidding algorithm. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the DV360 partner that has access to the custom bidding algorithm.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the DV360 partner that has access to the custom bidding algorithm. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the CustomLists collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.CustomLists.List request = displayvideo.customLists().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public CustomLists customLists() {
return new CustomLists();
}
/**
* The "customLists" collection of methods.
*/
public class CustomLists {
/**
* Gets a custom list.
*
* Create a request for the method "customLists.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param customListId Required. The ID of the custom list to fetch.
* @return the request
*/
public Get get(java.lang.Long customListId) throws java.io.IOException {
Get result = new Get(customListId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.CustomList> {
private static final String REST_PATH = "v1/customLists/{+customListId}";
private final java.util.regex.Pattern CUSTOM_LIST_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a custom list.
*
* Create a request for the method "customLists.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param customListId Required. The ID of the custom list to fetch.
* @since 1.13
*/
protected Get(java.lang.Long customListId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.CustomList.class);
this.customListId = com.google.api.client.util.Preconditions.checkNotNull(customListId, "Required parameter customListId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the custom list to fetch. */
@com.google.api.client.util.Key
private java.lang.Long customListId;
/** Required. The ID of the custom list to fetch.
*/
public java.lang.Long getCustomListId() {
return customListId;
}
/** Required. The ID of the custom list to fetch. */
public Get setCustomListId(java.lang.Long customListId) {
this.customListId = customListId;
return this;
}
/** The ID of the DV360 advertiser that has access to the fetched custom lists. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the DV360 advertiser that has access to the fetched custom lists.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the DV360 advertiser that has access to the fetched custom lists. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists custom lists. The order is defined by the order_by parameter.
*
* Create a request for the method "customLists.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListCustomListsResponse> {
private static final String REST_PATH = "v1/customLists";
/**
* Lists custom lists. The order is defined by the order_by parameter.
*
* Create a request for the method "customLists.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListCustomListsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the DV360 advertiser that has access to the fetched custom lists. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the DV360 advertiser that has access to the fetched custom lists.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the DV360 advertiser that has access to the fetched custom lists. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by custom list fields. Supported syntax: * Filter expressions for custom
* lists currently can only contain at most one restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields: -
* `displayName` Examples: * All custom lists for which the display name contains "Google":
* `displayName : "Google"`. The length of this field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by custom list fields. Supported syntax: * Filter expressions for custom lists
currently can only contain at most one restriction. * A restriction has the form of `{field}
{operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields: - `displayName`
Examples: * All custom lists for which the display name contains "Google": `displayName :
"Google"`. The length of this field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by custom list fields. Supported syntax: * Filter expressions for custom
* lists currently can only contain at most one restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields: -
* `displayName` Examples: * All custom lists for which the display name contains "Google":
* `displayName : "Google"`. The length of this field should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `customListId` (default) *
* `displayName` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `customListId` (default) * `displayName`
The default sorting order is ascending. To specify descending order for a field, a suffix "desc"
should be added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `customListId` (default) *
* `displayName` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCustomLists` method. If
* not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListCustomLists` method. If not specified, the
first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListCustomLists` method. If
* not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the FirstAndThirdPartyAudiences collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.FirstAndThirdPartyAudiences.List request = displayvideo.firstAndThirdPartyAudiences().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public FirstAndThirdPartyAudiences firstAndThirdPartyAudiences() {
return new FirstAndThirdPartyAudiences();
}
/**
* The "firstAndThirdPartyAudiences" collection of methods.
*/
public class FirstAndThirdPartyAudiences {
/**
* Gets a first and third party audience.
*
* Create a request for the method "firstAndThirdPartyAudiences.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param firstAndThirdPartyAudienceId Required. The ID of the first and third party audience to fetch.
* @return the request
*/
public Get get(java.lang.Long firstAndThirdPartyAudienceId) throws java.io.IOException {
Get result = new Get(firstAndThirdPartyAudienceId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.FirstAndThirdPartyAudience> {
private static final String REST_PATH = "v1/firstAndThirdPartyAudiences/{+firstAndThirdPartyAudienceId}";
private final java.util.regex.Pattern FIRST_AND_THIRD_PARTY_AUDIENCE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a first and third party audience.
*
* Create a request for the method "firstAndThirdPartyAudiences.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param firstAndThirdPartyAudienceId Required. The ID of the first and third party audience to fetch.
* @since 1.13
*/
protected Get(java.lang.Long firstAndThirdPartyAudienceId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.FirstAndThirdPartyAudience.class);
this.firstAndThirdPartyAudienceId = com.google.api.client.util.Preconditions.checkNotNull(firstAndThirdPartyAudienceId, "Required parameter firstAndThirdPartyAudienceId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the first and third party audience to fetch. */
@com.google.api.client.util.Key
private java.lang.Long firstAndThirdPartyAudienceId;
/** Required. The ID of the first and third party audience to fetch.
*/
public java.lang.Long getFirstAndThirdPartyAudienceId() {
return firstAndThirdPartyAudienceId;
}
/** Required. The ID of the first and third party audience to fetch. */
public Get setFirstAndThirdPartyAudienceId(java.lang.Long firstAndThirdPartyAudienceId) {
this.firstAndThirdPartyAudienceId = firstAndThirdPartyAudienceId;
return this;
}
/**
* The ID of the advertiser that has access to the fetched first and third party audience.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the fetched first and third party audience.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that has access to the fetched first and third party audience.
*/
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** The ID of the partner that has access to the fetched first and third party audience. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the fetched first and third party audience.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that has access to the fetched first and third party audience. */
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists first and third party audiences. The order is defined by the order_by parameter.
*
* Create a request for the method "firstAndThirdPartyAudiences.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListFirstAndThirdPartyAudiencesResponse> {
private static final String REST_PATH = "v1/firstAndThirdPartyAudiences";
/**
* Lists first and third party audiences. The order is defined by the order_by parameter.
*
* Create a request for the method "firstAndThirdPartyAudiences.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListFirstAndThirdPartyAudiencesResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* The ID of the advertiser that has access to the fetched first and third party audiences.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the fetched first and third party audiences.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that has access to the fetched first and third party audiences.
*/
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by first and third party audience fields. Supported syntax: * Filter
* expressions for first and third party audiences currently can only contain at most one
* restriction. * A restriction has the form of `{field} {operator} {value}`. * The operator
* must be `CONTAINS (:)`. * Supported fields: - `displayName` Examples: * All first and third
* party audiences for which the display name contains "Google": `displayName : "Google"`. The
* length of this field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by first and third party audience fields. Supported syntax: * Filter expressions
for first and third party audiences currently can only contain at most one restriction. * A
restriction has the form of `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. *
Supported fields: - `displayName` Examples: * All first and third party audiences for which the
display name contains "Google": `displayName : "Google"`. The length of this field should be no
more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by first and third party audience fields. Supported syntax: * Filter
* expressions for first and third party audiences currently can only contain at most one
* restriction. * A restriction has the form of `{field} {operator} {value}`. * The operator
* must be `CONTAINS (:)`. * Supported fields: - `displayName` Examples: * All first and third
* party audiences for which the display name contains "Google": `displayName : "Google"`. The
* length of this field should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `firstAndThirdPartyAudienceId`
* (default) * `displayName` The default sorting order is ascending. To specify descending
* order for a field, a suffix "desc" should be added to the field name. Example: `displayName
* desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `firstAndThirdPartyAudienceId` (default)
* `displayName` The default sorting order is ascending. To specify descending order for a field, a
suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `firstAndThirdPartyAudienceId`
* (default) * `displayName` The default sorting order is ascending. To specify descending
* order for a field, a suffix "desc" should be added to the field name. Example: `displayName
* desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to
* `ListFirstAndThirdPartyAudiences` method. If not specified, the first page of results will
* be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListFirstAndThirdPartyAudiences` method. If not
specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to
* `ListFirstAndThirdPartyAudiences` method. If not specified, the first page of results will
* be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** The ID of the partner that has access to the fetched first and third party audiences. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the fetched first and third party audiences.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that has access to the fetched first and third party audiences. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the FloodlightGroups collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.FloodlightGroups.List request = displayvideo.floodlightGroups().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public FloodlightGroups floodlightGroups() {
return new FloodlightGroups();
}
/**
* The "floodlightGroups" collection of methods.
*/
public class FloodlightGroups {
/**
* Gets a Floodlight group.
*
* Create a request for the method "floodlightGroups.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param floodlightGroupId Required. The ID of the Floodlight group to fetch.
* @return the request
*/
public Get get(java.lang.Long floodlightGroupId) throws java.io.IOException {
Get result = new Get(floodlightGroupId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.FloodlightGroup> {
private static final String REST_PATH = "v1/floodlightGroups/{+floodlightGroupId}";
private final java.util.regex.Pattern FLOODLIGHT_GROUP_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a Floodlight group.
*
* Create a request for the method "floodlightGroups.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param floodlightGroupId Required. The ID of the Floodlight group to fetch.
* @since 1.13
*/
protected Get(java.lang.Long floodlightGroupId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.FloodlightGroup.class);
this.floodlightGroupId = com.google.api.client.util.Preconditions.checkNotNull(floodlightGroupId, "Required parameter floodlightGroupId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the Floodlight group to fetch. */
@com.google.api.client.util.Key
private java.lang.Long floodlightGroupId;
/** Required. The ID of the Floodlight group to fetch.
*/
public java.lang.Long getFloodlightGroupId() {
return floodlightGroupId;
}
/** Required. The ID of the Floodlight group to fetch. */
public Get setFloodlightGroupId(java.lang.Long floodlightGroupId) {
this.floodlightGroupId = floodlightGroupId;
return this;
}
/** Required. The partner context by which the Floodlight group is being accessed. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The partner context by which the Floodlight group is being accessed.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** Required. The partner context by which the Floodlight group is being accessed. */
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Updates an existing Floodlight group. Returns the updated Floodlight group if successful.
*
* Create a request for the method "floodlightGroups.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param floodlightGroupId Output only. The unique ID of the Floodlight group. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.FloodlightGroup}
* @return the request
*/
public Patch patch(java.lang.Long floodlightGroupId, com.google.api.services.displayvideo.v1.model.FloodlightGroup content) throws java.io.IOException {
Patch result = new Patch(floodlightGroupId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.FloodlightGroup> {
private static final String REST_PATH = "v1/floodlightGroups/{floodlightGroupId}";
/**
* Updates an existing Floodlight group. Returns the updated Floodlight group if successful.
*
* Create a request for the method "floodlightGroups.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param floodlightGroupId Output only. The unique ID of the Floodlight group. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.FloodlightGroup}
* @since 1.13
*/
protected Patch(java.lang.Long floodlightGroupId, com.google.api.services.displayvideo.v1.model.FloodlightGroup content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.FloodlightGroup.class);
this.floodlightGroupId = com.google.api.client.util.Preconditions.checkNotNull(floodlightGroupId, "Required parameter floodlightGroupId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the Floodlight group. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long floodlightGroupId;
/** Output only. The unique ID of the Floodlight group. Assigned by the system.
*/
public java.lang.Long getFloodlightGroupId() {
return floodlightGroupId;
}
/** Output only. The unique ID of the Floodlight group. Assigned by the system. */
public Patch setFloodlightGroupId(java.lang.Long floodlightGroupId) {
this.floodlightGroupId = floodlightGroupId;
return this;
}
/** Required. The partner context by which the Floodlight group is being accessed. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The partner context by which the Floodlight group is being accessed.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** Required. The partner context by which the Floodlight group is being accessed. */
public Patch setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the GoogleAudiences collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.GoogleAudiences.List request = displayvideo.googleAudiences().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public GoogleAudiences googleAudiences() {
return new GoogleAudiences();
}
/**
* The "googleAudiences" collection of methods.
*/
public class GoogleAudiences {
/**
* Gets a Google audience.
*
* Create a request for the method "googleAudiences.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param googleAudienceId Required. The ID of the Google audience to fetch.
* @return the request
*/
public Get get(java.lang.Long googleAudienceId) throws java.io.IOException {
Get result = new Get(googleAudienceId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.GoogleAudience> {
private static final String REST_PATH = "v1/googleAudiences/{+googleAudienceId}";
private final java.util.regex.Pattern GOOGLE_AUDIENCE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a Google audience.
*
* Create a request for the method "googleAudiences.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param googleAudienceId Required. The ID of the Google audience to fetch.
* @since 1.13
*/
protected Get(java.lang.Long googleAudienceId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.GoogleAudience.class);
this.googleAudienceId = com.google.api.client.util.Preconditions.checkNotNull(googleAudienceId, "Required parameter googleAudienceId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the Google audience to fetch. */
@com.google.api.client.util.Key
private java.lang.Long googleAudienceId;
/** Required. The ID of the Google audience to fetch.
*/
public java.lang.Long getGoogleAudienceId() {
return googleAudienceId;
}
/** Required. The ID of the Google audience to fetch. */
public Get setGoogleAudienceId(java.lang.Long googleAudienceId) {
this.googleAudienceId = googleAudienceId;
return this;
}
/** The ID of the advertiser that has access to the fetched Google audience. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the fetched Google audience.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that has access to the fetched Google audience. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** The ID of the partner that has access to the fetched Google audience. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the fetched Google audience.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that has access to the fetched Google audience. */
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists Google audiences. The order is defined by the order_by parameter.
*
* Create a request for the method "googleAudiences.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListGoogleAudiencesResponse> {
private static final String REST_PATH = "v1/googleAudiences";
/**
* Lists Google audiences. The order is defined by the order_by parameter.
*
* Create a request for the method "googleAudiences.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListGoogleAudiencesResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that has access to the fetched Google audiences. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the fetched Google audiences.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that has access to the fetched Google audiences. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by Google audience fields. Supported syntax: * Filter expressions for
* Google audiences currently can only contain at most one restriction. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. *
* Supported fields: - `displayName` Examples: * All Google audiences for which the display
* name contains "Google": `displayName : "Google"`. The length of this field should be no
* more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by Google audience fields. Supported syntax: * Filter expressions for Google
audiences currently can only contain at most one restriction. * A restriction has the form of
`{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields: -
`displayName` Examples: * All Google audiences for which the display name contains "Google":
`displayName : "Google"`. The length of this field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by Google audience fields. Supported syntax: * Filter expressions for
* Google audiences currently can only contain at most one restriction. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. *
* Supported fields: - `displayName` Examples: * All Google audiences for which the display
* name contains "Google": `displayName : "Google"`. The length of this field should be no
* more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `googleAudienceId` (default) *
* `displayName` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `googleAudienceId` (default) *
`displayName` The default sorting order is ascending. To specify descending order for a field, a
suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `googleAudienceId` (default) *
* `displayName` The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListGoogleAudiences` method.
* If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListGoogleAudiences` method. If not specified,
the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListGoogleAudiences` method.
* If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** The ID of the partner that has access to the fetched Google audiences. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the fetched Google audiences.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that has access to the fetched Google audiences. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the InventorySourceGroups collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.InventorySourceGroups.List request = displayvideo.inventorySourceGroups().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public InventorySourceGroups inventorySourceGroups() {
return new InventorySourceGroups();
}
/**
* The "inventorySourceGroups" collection of methods.
*/
public class InventorySourceGroups {
/**
* Creates a new inventory source group. Returns the newly created inventory source group if
* successful.
*
* Create a request for the method "inventorySourceGroups.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.displayvideo.v1.model.InventorySourceGroup}
* @return the request
*/
public Create create(com.google.api.services.displayvideo.v1.model.InventorySourceGroup content) throws java.io.IOException {
Create result = new Create(content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.InventorySourceGroup> {
private static final String REST_PATH = "v1/inventorySourceGroups";
/**
* Creates a new inventory source group. Returns the newly created inventory source group if
* successful.
*
* Create a request for the method "inventorySourceGroups.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.displayvideo.v1.model.InventorySourceGroup}
* @since 1.13
*/
protected Create(com.google.api.services.displayvideo.v1.model.InventorySourceGroup content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.InventorySourceGroup.class);
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* The ID of the advertiser that owns the inventory source group. The parent partner will not
* have access to this group.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the inventory source group. The parent partner will not have
access to this group.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that owns the inventory source group. The parent partner will not
* have access to this group.
*/
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* The ID of the partner that owns the inventory source group. Only this partner will have
* write access to this group. Only advertisers to which this group is explicitly shared will
* have read access to this group.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the inventory source group. Only this partner will have write
access to this group. Only advertisers to which this group is explicitly shared will have read
access to this group.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* The ID of the partner that owns the inventory source group. Only this partner will have
* write access to this group. Only advertisers to which this group is explicitly shared will
* have read access to this group.
*/
public Create setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes an inventory source group.
*
* Create a request for the method "inventorySourceGroups.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to delete.
* @return the request
*/
public Delete delete(java.lang.Long inventorySourceGroupId) throws java.io.IOException {
Delete result = new Delete(inventorySourceGroupId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/inventorySourceGroups/{+inventorySourceGroupId}";
private final java.util.regex.Pattern INVENTORY_SOURCE_GROUP_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes an inventory source group.
*
* Create a request for the method "inventorySourceGroups.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to delete.
* @since 1.13
*/
protected Delete(java.lang.Long inventorySourceGroupId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.inventorySourceGroupId = com.google.api.client.util.Preconditions.checkNotNull(inventorySourceGroupId, "Required parameter inventorySourceGroupId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the inventory source group to delete. */
@com.google.api.client.util.Key
private java.lang.Long inventorySourceGroupId;
/** Required. The ID of the inventory source group to delete.
*/
public java.lang.Long getInventorySourceGroupId() {
return inventorySourceGroupId;
}
/** Required. The ID of the inventory source group to delete. */
public Delete setInventorySourceGroupId(java.lang.Long inventorySourceGroupId) {
this.inventorySourceGroupId = inventorySourceGroupId;
return this;
}
/**
* The ID of the advertiser that owns the inventory source group. The parent partner does not
* have access to this group.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the inventory source group. The parent partner does not have
access to this group.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that owns the inventory source group. The parent partner does not
* have access to this group.
*/
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* The ID of the partner that owns the inventory source group. Only this partner has write
* access to this group.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the inventory source group. Only this partner has write access to
this group.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* The ID of the partner that owns the inventory source group. Only this partner has write
* access to this group.
*/
public Delete setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets an inventory source group.
*
* Create a request for the method "inventorySourceGroups.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to fetch.
* @return the request
*/
public Get get(java.lang.Long inventorySourceGroupId) throws java.io.IOException {
Get result = new Get(inventorySourceGroupId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.InventorySourceGroup> {
private static final String REST_PATH = "v1/inventorySourceGroups/{+inventorySourceGroupId}";
private final java.util.regex.Pattern INVENTORY_SOURCE_GROUP_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets an inventory source group.
*
* Create a request for the method "inventorySourceGroups.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to fetch.
* @since 1.13
*/
protected Get(java.lang.Long inventorySourceGroupId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.InventorySourceGroup.class);
this.inventorySourceGroupId = com.google.api.client.util.Preconditions.checkNotNull(inventorySourceGroupId, "Required parameter inventorySourceGroupId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the inventory source group to fetch. */
@com.google.api.client.util.Key
private java.lang.Long inventorySourceGroupId;
/** Required. The ID of the inventory source group to fetch.
*/
public java.lang.Long getInventorySourceGroupId() {
return inventorySourceGroupId;
}
/** Required. The ID of the inventory source group to fetch. */
public Get setInventorySourceGroupId(java.lang.Long inventorySourceGroupId) {
this.inventorySourceGroupId = inventorySourceGroupId;
return this;
}
/**
* The ID of the advertiser that has access to the inventory source group. If an inventory
* source group is partner-owned, only advertisers to which the group is explicitly shared can
* access the group.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the inventory source group. If an inventory source
group is partner-owned, only advertisers to which the group is explicitly shared can access the
group.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that has access to the inventory source group. If an inventory
* source group is partner-owned, only advertisers to which the group is explicitly shared can
* access the group.
*/
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* The ID of the partner that has access to the inventory source group. A partner cannot
* access an advertiser-owned inventory source group.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the inventory source group. A partner cannot access an
advertiser-owned inventory source group.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* The ID of the partner that has access to the inventory source group. A partner cannot
* access an advertiser-owned inventory source group.
*/
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists inventory source groups that are accessible to the current user. The order is defined by
* the order_by parameter.
*
* Create a request for the method "inventorySourceGroups.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListInventorySourceGroupsResponse> {
private static final String REST_PATH = "v1/inventorySourceGroups";
/**
* Lists inventory source groups that are accessible to the current user. The order is defined by
* the order_by parameter.
*
* Create a request for the method "inventorySourceGroups.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListInventorySourceGroupsResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* The ID of the advertiser that has access to the inventory source group. If an inventory
* source group is partner-owned, only advertisers to which the group is explicitly shared can
* access the group.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the inventory source group. If an inventory source
group is partner-owned, only advertisers to which the group is explicitly shared can access the
group.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that has access to the inventory source group. If an inventory
* source group is partner-owned, only advertisers to which the group is explicitly shared can
* access the group.
*/
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by inventory source group properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by the
* logical operator `OR`. * A restriction has the form of `{field} {operator} {value}`. * The
* operator must be `EQUALS (=)`. * Supported fields: - `inventorySourceGroupId` The length of
* this field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by inventory source group properties. Supported syntax: * Filter expressions are
made up of one or more restrictions. * Restrictions can be combined by the logical operator `OR`. *
A restriction has the form of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. *
Supported fields: - `inventorySourceGroupId` The length of this field should be no more than 500
characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by inventory source group properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by the
* logical operator `OR`. * A restriction has the form of `{field} {operator} {value}`. * The
* operator must be `EQUALS (=)`. * Supported fields: - `inventorySourceGroupId` The length of
* this field should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `inventorySourceGroupId` The default sorting order is ascending. To specify descending
* order for a field, a suffix "desc" should be added to the field name. For example,
* `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` (default) *
`inventorySourceGroupId` The default sorting order is ascending. To specify descending order for a
field, a suffix "desc" should be added to the field name. For example, `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `inventorySourceGroupId` The default sorting order is ascending. To specify descending
* order for a field, a suffix "desc" should be added to the field name. For example,
* `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListInventorySources` method.
* If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListInventorySources` method. If not specified,
the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListInventorySources` method.
* If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/**
* The ID of the partner that has access to the inventory source group. A partner cannot
* access advertiser-owned inventory source groups.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the inventory source group. A partner cannot access
advertiser-owned inventory source groups.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* The ID of the partner that has access to the inventory source group. A partner cannot
* access advertiser-owned inventory source groups.
*/
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates an inventory source group. Returns the updated inventory source group if successful.
*
* Create a request for the method "inventorySourceGroups.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param inventorySourceGroupId Output only. The unique ID of the inventory source group. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.InventorySourceGroup}
* @return the request
*/
public Patch patch(java.lang.Long inventorySourceGroupId, com.google.api.services.displayvideo.v1.model.InventorySourceGroup content) throws java.io.IOException {
Patch result = new Patch(inventorySourceGroupId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.InventorySourceGroup> {
private static final String REST_PATH = "v1/inventorySourceGroups/{inventorySourceGroupId}";
/**
* Updates an inventory source group. Returns the updated inventory source group if successful.
*
* Create a request for the method "inventorySourceGroups.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param inventorySourceGroupId Output only. The unique ID of the inventory source group. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.InventorySourceGroup}
* @since 1.13
*/
protected Patch(java.lang.Long inventorySourceGroupId, com.google.api.services.displayvideo.v1.model.InventorySourceGroup content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.InventorySourceGroup.class);
this.inventorySourceGroupId = com.google.api.client.util.Preconditions.checkNotNull(inventorySourceGroupId, "Required parameter inventorySourceGroupId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the inventory source group. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long inventorySourceGroupId;
/** Output only. The unique ID of the inventory source group. Assigned by the system.
*/
public java.lang.Long getInventorySourceGroupId() {
return inventorySourceGroupId;
}
/** Output only. The unique ID of the inventory source group. Assigned by the system. */
public Patch setInventorySourceGroupId(java.lang.Long inventorySourceGroupId) {
this.inventorySourceGroupId = inventorySourceGroupId;
return this;
}
/**
* The ID of the advertiser that owns the inventory source group. The parent partner does not
* have access to this group.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the inventory source group. The parent partner does not have
access to this group.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that owns the inventory source group. The parent partner does not
* have access to this group.
*/
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* The ID of the partner that owns the inventory source group. Only this partner has write
* access to this group.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the inventory source group. Only this partner has write access to
this group.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* The ID of the partner that owns the inventory source group. Only this partner has write
* access to this group.
*/
public Patch setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the AssignedInventorySources collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.AssignedInventorySources.List request = displayvideo.assignedInventorySources().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public AssignedInventorySources assignedInventorySources() {
return new AssignedInventorySources();
}
/**
* The "assignedInventorySources" collection of methods.
*/
public class AssignedInventorySources {
/**
* Bulk edits multiple assignments between inventory sources and a single inventory source group.
* The operation will delete the assigned inventory sources provided in
* BulkEditAssignedInventorySourcesRequest.deleted_assigned_inventory_sources and then create the
* assigned inventory sources provided in
* BulkEditAssignedInventorySourcesRequest.created_assigned_inventory_sources.
*
* Create a request for the method "assignedInventorySources.bulkEdit".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to which the assignments are assigned.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditAssignedInventorySourcesRequest}
* @return the request
*/
public BulkEdit bulkEdit(java.lang.Long inventorySourceGroupId, com.google.api.services.displayvideo.v1.model.BulkEditAssignedInventorySourcesRequest content) throws java.io.IOException {
BulkEdit result = new BulkEdit(inventorySourceGroupId, content);
initialize(result);
return result;
}
public class BulkEdit extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkEditAssignedInventorySourcesResponse> {
private static final String REST_PATH = "v1/inventorySourceGroups/{+inventorySourceGroupId}/assignedInventorySources:bulkEdit";
private final java.util.regex.Pattern INVENTORY_SOURCE_GROUP_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Bulk edits multiple assignments between inventory sources and a single inventory source group.
* The operation will delete the assigned inventory sources provided in
* BulkEditAssignedInventorySourcesRequest.deleted_assigned_inventory_sources and then create the
* assigned inventory sources provided in
* BulkEditAssignedInventorySourcesRequest.created_assigned_inventory_sources.
*
* Create a request for the method "assignedInventorySources.bulkEdit".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
* <p> {@link
* BulkEdit#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to which the assignments are assigned.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditAssignedInventorySourcesRequest}
* @since 1.13
*/
protected BulkEdit(java.lang.Long inventorySourceGroupId, com.google.api.services.displayvideo.v1.model.BulkEditAssignedInventorySourcesRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.BulkEditAssignedInventorySourcesResponse.class);
this.inventorySourceGroupId = com.google.api.client.util.Preconditions.checkNotNull(inventorySourceGroupId, "Required parameter inventorySourceGroupId must be specified.");
}
@Override
public BulkEdit set$Xgafv(java.lang.String $Xgafv) {
return (BulkEdit) super.set$Xgafv($Xgafv);
}
@Override
public BulkEdit setAccessToken(java.lang.String accessToken) {
return (BulkEdit) super.setAccessToken(accessToken);
}
@Override
public BulkEdit setAlt(java.lang.String alt) {
return (BulkEdit) super.setAlt(alt);
}
@Override
public BulkEdit setCallback(java.lang.String callback) {
return (BulkEdit) super.setCallback(callback);
}
@Override
public BulkEdit setFields(java.lang.String fields) {
return (BulkEdit) super.setFields(fields);
}
@Override
public BulkEdit setKey(java.lang.String key) {
return (BulkEdit) super.setKey(key);
}
@Override
public BulkEdit setOauthToken(java.lang.String oauthToken) {
return (BulkEdit) super.setOauthToken(oauthToken);
}
@Override
public BulkEdit setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkEdit) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkEdit setQuotaUser(java.lang.String quotaUser) {
return (BulkEdit) super.setQuotaUser(quotaUser);
}
@Override
public BulkEdit setUploadType(java.lang.String uploadType) {
return (BulkEdit) super.setUploadType(uploadType);
}
@Override
public BulkEdit setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkEdit) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the inventory source group to which the assignments are assigned.
*/
@com.google.api.client.util.Key
private java.lang.Long inventorySourceGroupId;
/** Required. The ID of the inventory source group to which the assignments are assigned.
*/
public java.lang.Long getInventorySourceGroupId() {
return inventorySourceGroupId;
}
/**
* Required. The ID of the inventory source group to which the assignments are assigned.
*/
public BulkEdit setInventorySourceGroupId(java.lang.Long inventorySourceGroupId) {
this.inventorySourceGroupId = inventorySourceGroupId;
return this;
}
@Override
public BulkEdit set(String parameterName, Object value) {
return (BulkEdit) super.set(parameterName, value);
}
}
/**
* Creates an assignment between an inventory source and an inventory source group.
*
* Create a request for the method "assignedInventorySources.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to which the assignment will be assigned.
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedInventorySource}
* @return the request
*/
public Create create(java.lang.Long inventorySourceGroupId, com.google.api.services.displayvideo.v1.model.AssignedInventorySource content) throws java.io.IOException {
Create result = new Create(inventorySourceGroupId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.AssignedInventorySource> {
private static final String REST_PATH = "v1/inventorySourceGroups/{+inventorySourceGroupId}/assignedInventorySources";
private final java.util.regex.Pattern INVENTORY_SOURCE_GROUP_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates an assignment between an inventory source and an inventory source group.
*
* Create a request for the method "assignedInventorySources.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to which the assignment will be assigned.
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedInventorySource}
* @since 1.13
*/
protected Create(java.lang.Long inventorySourceGroupId, com.google.api.services.displayvideo.v1.model.AssignedInventorySource content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.AssignedInventorySource.class);
this.inventorySourceGroupId = com.google.api.client.util.Preconditions.checkNotNull(inventorySourceGroupId, "Required parameter inventorySourceGroupId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the inventory source group to which the assignment will be assigned.
*/
@com.google.api.client.util.Key
private java.lang.Long inventorySourceGroupId;
/** Required. The ID of the inventory source group to which the assignment will be assigned.
*/
public java.lang.Long getInventorySourceGroupId() {
return inventorySourceGroupId;
}
/**
* Required. The ID of the inventory source group to which the assignment will be assigned.
*/
public Create setInventorySourceGroupId(java.lang.Long inventorySourceGroupId) {
this.inventorySourceGroupId = inventorySourceGroupId;
return this;
}
/**
* The ID of the advertiser that owns the parent inventory source group. The parent partner
* will not have access to this assigned inventory source.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the parent inventory source group. The parent partner will not
have access to this assigned inventory source.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that owns the parent inventory source group. The parent partner
* will not have access to this assigned inventory source.
*/
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* The ID of the partner that owns the parent inventory source group. Only this partner will
* have write access to this assigned inventory source.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the parent inventory source group. Only this partner will have
write access to this assigned inventory source.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* The ID of the partner that owns the parent inventory source group. Only this partner will
* have write access to this assigned inventory source.
*/
public Create setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes the assignment between an inventory source and an inventory source group.
*
* Create a request for the method "assignedInventorySources.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to which this assignment is assigned.
* @param assignedInventorySourceId Required. The ID of the assigned inventory source to delete.
* @return the request
*/
public Delete delete(java.lang.Long inventorySourceGroupId, java.lang.Long assignedInventorySourceId) throws java.io.IOException {
Delete result = new Delete(inventorySourceGroupId, assignedInventorySourceId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/inventorySourceGroups/{+inventorySourceGroupId}/assignedInventorySources/{+assignedInventorySourceId}";
private final java.util.regex.Pattern INVENTORY_SOURCE_GROUP_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ASSIGNED_INVENTORY_SOURCE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes the assignment between an inventory source and an inventory source group.
*
* Create a request for the method "assignedInventorySources.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to which this assignment is assigned.
* @param assignedInventorySourceId Required. The ID of the assigned inventory source to delete.
* @since 1.13
*/
protected Delete(java.lang.Long inventorySourceGroupId, java.lang.Long assignedInventorySourceId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.inventorySourceGroupId = com.google.api.client.util.Preconditions.checkNotNull(inventorySourceGroupId, "Required parameter inventorySourceGroupId must be specified.");
this.assignedInventorySourceId = com.google.api.client.util.Preconditions.checkNotNull(assignedInventorySourceId, "Required parameter assignedInventorySourceId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the inventory source group to which this assignment is assigned. */
@com.google.api.client.util.Key
private java.lang.Long inventorySourceGroupId;
/** Required. The ID of the inventory source group to which this assignment is assigned.
*/
public java.lang.Long getInventorySourceGroupId() {
return inventorySourceGroupId;
}
/** Required. The ID of the inventory source group to which this assignment is assigned. */
public Delete setInventorySourceGroupId(java.lang.Long inventorySourceGroupId) {
this.inventorySourceGroupId = inventorySourceGroupId;
return this;
}
/** Required. The ID of the assigned inventory source to delete. */
@com.google.api.client.util.Key
private java.lang.Long assignedInventorySourceId;
/** Required. The ID of the assigned inventory source to delete.
*/
public java.lang.Long getAssignedInventorySourceId() {
return assignedInventorySourceId;
}
/** Required. The ID of the assigned inventory source to delete. */
public Delete setAssignedInventorySourceId(java.lang.Long assignedInventorySourceId) {
this.assignedInventorySourceId = assignedInventorySourceId;
return this;
}
/**
* The ID of the advertiser that owns the parent inventory source group. The parent partner
* does not have access to this assigned inventory source.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the parent inventory source group. The parent partner does not
have access to this assigned inventory source.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that owns the parent inventory source group. The parent partner
* does not have access to this assigned inventory source.
*/
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* The ID of the partner that owns the parent inventory source group. Only this partner has
* write access to this assigned inventory source.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the parent inventory source group. Only this partner has write
access to this assigned inventory source.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* The ID of the partner that owns the parent inventory source group. Only this partner has
* write access to this assigned inventory source.
*/
public Delete setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Lists inventory sources assigned to an inventory source group.
*
* Create a request for the method "assignedInventorySources.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to which these assignments are assigned.
* @return the request
*/
public List list(java.lang.Long inventorySourceGroupId) throws java.io.IOException {
List result = new List(inventorySourceGroupId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListAssignedInventorySourcesResponse> {
private static final String REST_PATH = "v1/inventorySourceGroups/{+inventorySourceGroupId}/assignedInventorySources";
private final java.util.regex.Pattern INVENTORY_SOURCE_GROUP_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists inventory sources assigned to an inventory source group.
*
* Create a request for the method "assignedInventorySources.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param inventorySourceGroupId Required. The ID of the inventory source group to which these assignments are assigned.
* @since 1.13
*/
protected List(java.lang.Long inventorySourceGroupId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListAssignedInventorySourcesResponse.class);
this.inventorySourceGroupId = com.google.api.client.util.Preconditions.checkNotNull(inventorySourceGroupId, "Required parameter inventorySourceGroupId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The ID of the inventory source group to which these assignments are assigned.
*/
@com.google.api.client.util.Key
private java.lang.Long inventorySourceGroupId;
/** Required. The ID of the inventory source group to which these assignments are assigned.
*/
public java.lang.Long getInventorySourceGroupId() {
return inventorySourceGroupId;
}
/**
* Required. The ID of the inventory source group to which these assignments are assigned.
*/
public List setInventorySourceGroupId(java.lang.Long inventorySourceGroupId) {
this.inventorySourceGroupId = inventorySourceGroupId;
return this;
}
/**
* The ID of the advertiser that has access to the assignment. If the parent inventory
* source group is partner-owned, only advertisers to which the parent group is explicitly
* shared can access the assigned inventory source.
*/
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the assignment. If the parent inventory source group is
partner-owned, only advertisers to which the parent group is explicitly shared can access the
assigned inventory source.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/**
* The ID of the advertiser that has access to the assignment. If the parent inventory
* source group is partner-owned, only advertisers to which the parent group is explicitly
* shared can access the assigned inventory source.
*/
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by assigned inventory source fields. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR`. * A restriction has the form of `{field} {operator} {value}`.
* * The operator must be `EQUALS (=)`. * Supported fields: - `assignedInventorySourceId`
* The length of this field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by assigned inventory source fields. Supported syntax: * Filter expressions are
made up of one or more restrictions. * Restrictions can be combined by the logical operator `OR`. *
A restriction has the form of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. *
Supported fields: - `assignedInventorySourceId` The length of this field should be no more than 500
characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by assigned inventory source fields. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR`. * A restriction has the form of `{field} {operator} {value}`.
* * The operator must be `EQUALS (=)`. * Supported fields: - `assignedInventorySourceId`
* The length of this field should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedInventorySourceId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix " desc" should be added to the field name. Example:
* `assignedInventorySourceId desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `assignedInventorySourceId` (default) The
default sorting order is ascending. To specify descending order for a field, a suffix " desc"
should be added to the field name. Example: `assignedInventorySourceId desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedInventorySourceId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix " desc" should be added to the field name. Example:
* `assignedInventorySourceId desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to
* `ListAssignedInventorySources` method. If not specified, the first page of results will
* be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListAssignedInventorySources` method. If not
specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to
* `ListAssignedInventorySources` method. If not specified, the first page of results will
* be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/**
* The ID of the partner that has access to the assignment. If the parent inventory source
* group is advertiser-owned, the assignment cannot be accessed via a partner.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the assignment. If the parent inventory source group is
advertiser-owned, the assignment cannot be accessed via a partner.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* The ID of the partner that has access to the assignment. If the parent inventory source
* group is advertiser-owned, the assignment cannot be accessed via a partner.
*/
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the InventorySources collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.InventorySources.List request = displayvideo.inventorySources().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public InventorySources inventorySources() {
return new InventorySources();
}
/**
* The "inventorySources" collection of methods.
*/
public class InventorySources {
/**
* Gets an inventory source.
*
* Create a request for the method "inventorySources.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param inventorySourceId Required. The ID of the inventory source to fetch.
* @return the request
*/
public Get get(java.lang.Long inventorySourceId) throws java.io.IOException {
Get result = new Get(inventorySourceId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.InventorySource> {
private static final String REST_PATH = "v1/inventorySources/{+inventorySourceId}";
private final java.util.regex.Pattern INVENTORY_SOURCE_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets an inventory source.
*
* Create a request for the method "inventorySources.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param inventorySourceId Required. The ID of the inventory source to fetch.
* @since 1.13
*/
protected Get(java.lang.Long inventorySourceId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.InventorySource.class);
this.inventorySourceId = com.google.api.client.util.Preconditions.checkNotNull(inventorySourceId, "Required parameter inventorySourceId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the inventory source to fetch. */
@com.google.api.client.util.Key
private java.lang.Long inventorySourceId;
/** Required. The ID of the inventory source to fetch.
*/
public java.lang.Long getInventorySourceId() {
return inventorySourceId;
}
/** Required. The ID of the inventory source to fetch. */
public Get setInventorySourceId(java.lang.Long inventorySourceId) {
this.inventorySourceId = inventorySourceId;
return this;
}
/**
* Required. The ID of the DV360 partner to which the fetched inventory source is
* permissioned.
*/
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The ID of the DV360 partner to which the fetched inventory source is permissioned.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/**
* Required. The ID of the DV360 partner to which the fetched inventory source is
* permissioned.
*/
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists inventory sources that are accessible to the current user. The order is defined by the
* order_by parameter. If a filter by entity_status is not specified, inventory sources with entity
* status `ENTITY_STATUS_ARCHIVED` will not be included in the results.
*
* Create a request for the method "inventorySources.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListInventorySourcesResponse> {
private static final String REST_PATH = "v1/inventorySources";
/**
* Lists inventory sources that are accessible to the current user. The order is defined by the
* order_by parameter. If a filter by entity_status is not specified, inventory sources with
* entity status `ENTITY_STATUS_ARCHIVED` will not be included in the results.
*
* Create a request for the method "inventorySources.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListInventorySourcesResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the advertiser that has access to the inventory source. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that has access to the inventory source.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that has access to the inventory source. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by inventory source properties. Supported syntax: * Filter expressions are
* made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR`
* logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported
* fields: - `status.entityStatus` - `commitment` - `deliveryMethod` - `rateDetails.rateType`
* - `exchange` Examples: * All active inventory sources:
* `status.entityStatus="ENTITY_STATUS_ACTIVE"` * Inventory sources belonging to Google Ad
* Manager or Rubicon exchanges: `exchange="EXCHANGE_GOOGLE_AD_MANAGER" OR
* exchange="EXCHANGE_RUBICON"` The length of this field should be no more than 500
* characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by inventory source properties. Supported syntax: * Filter expressions are made up
of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A
sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator}
{value}`. * The operator must be `EQUALS (=)`. * Supported fields: - `status.entityStatus` -
`commitment` - `deliveryMethod` - `rateDetails.rateType` - `exchange` Examples: * All active
inventory sources: `status.entityStatus="ENTITY_STATUS_ACTIVE"` * Inventory sources belonging to
Google Ad Manager or Rubicon exchanges: `exchange="EXCHANGE_GOOGLE_AD_MANAGER" OR
exchange="EXCHANGE_RUBICON"` The length of this field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by inventory source properties. Supported syntax: * Filter expressions are
* made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR`
* logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has
* the form of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported
* fields: - `status.entityStatus` - `commitment` - `deliveryMethod` - `rateDetails.rateType`
* - `exchange` Examples: * All active inventory sources:
* `status.entityStatus="ENTITY_STATUS_ACTIVE"` * Inventory sources belonging to Google Ad
* Manager or Rubicon exchanges: `exchange="EXCHANGE_GOOGLE_AD_MANAGER" OR
* exchange="EXCHANGE_RUBICON"` The length of this field should be no more than 500
* characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. For example, `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` (default) The default
sorting order is ascending. To specify descending order for a field, a suffix "desc" should be
added to the field name. For example, `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. For example, `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListInventorySources` method.
* If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListInventorySources` method. If not specified,
the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListInventorySources` method.
* If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
/** The ID of the partner that has access to the inventory source. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that has access to the inventory source.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that has access to the inventory source. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Media collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Media.List request = displayvideo.media().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Media media() {
return new Media();
}
/**
* The "media" collection of methods.
*/
public class Media {
/**
* Downloads media. Download is supported on the URI `/download/{resource_name=**}?alt=media.`
* **Note**: Download requests will not be successful without including `alt=media` query string.
*
* Create a request for the method "media.download".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Download#execute()} method to invoke the remote operation.
*
* @param resourceName Name of the media that is being downloaded. See ReadRequest.resource_name.
* @return the request
*/
public Download download(java.lang.String resourceName) throws java.io.IOException {
Download result = new Download(resourceName);
initialize(result);
return result;
}
public class Download extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.GoogleBytestreamMedia> {
private static final String REST_PATH = "download/{+resourceName}";
private final java.util.regex.Pattern RESOURCE_NAME_PATTERN =
java.util.regex.Pattern.compile("^.*$");
/**
* Downloads media. Download is supported on the URI `/download/{resource_name=**}?alt=media.`
* **Note**: Download requests will not be successful without including `alt=media` query string.
*
* Create a request for the method "media.download".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Download#execute()} method to invoke the remote operation.
* <p> {@link
* Download#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resourceName Name of the media that is being downloaded. See ReadRequest.resource_name.
* @since 1.13
*/
protected Download(java.lang.String resourceName) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.GoogleBytestreamMedia.class);
this.resourceName = com.google.api.client.util.Preconditions.checkNotNull(resourceName, "Required parameter resourceName must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_NAME_PATTERN.matcher(resourceName).matches(),
"Parameter resourceName must conform to the pattern " +
"^.*$");
}
initializeMediaDownload();
}
@Override
public void executeMediaAndDownloadTo(java.io.OutputStream outputStream) throws java.io.IOException {
super.executeMediaAndDownloadTo(outputStream);
}
@Override
public java.io.InputStream executeMediaAsInputStream() throws java.io.IOException {
return super.executeMediaAsInputStream();
}
@Override
public com.google.api.client.http.HttpResponse executeMedia() throws java.io.IOException {
return super.executeMedia();
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Download set$Xgafv(java.lang.String $Xgafv) {
return (Download) super.set$Xgafv($Xgafv);
}
@Override
public Download setAccessToken(java.lang.String accessToken) {
return (Download) super.setAccessToken(accessToken);
}
@Override
public Download setAlt(java.lang.String alt) {
return (Download) super.setAlt(alt);
}
@Override
public Download setCallback(java.lang.String callback) {
return (Download) super.setCallback(callback);
}
@Override
public Download setFields(java.lang.String fields) {
return (Download) super.setFields(fields);
}
@Override
public Download setKey(java.lang.String key) {
return (Download) super.setKey(key);
}
@Override
public Download setOauthToken(java.lang.String oauthToken) {
return (Download) super.setOauthToken(oauthToken);
}
@Override
public Download setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Download) super.setPrettyPrint(prettyPrint);
}
@Override
public Download setQuotaUser(java.lang.String quotaUser) {
return (Download) super.setQuotaUser(quotaUser);
}
@Override
public Download setUploadType(java.lang.String uploadType) {
return (Download) super.setUploadType(uploadType);
}
@Override
public Download setUploadProtocol(java.lang.String uploadProtocol) {
return (Download) super.setUploadProtocol(uploadProtocol);
}
/** Name of the media that is being downloaded. See ReadRequest.resource_name. */
@com.google.api.client.util.Key
private java.lang.String resourceName;
/** Name of the media that is being downloaded. See ReadRequest.resource_name.
*/
public java.lang.String getResourceName() {
return resourceName;
}
/** Name of the media that is being downloaded. See ReadRequest.resource_name. */
public Download setResourceName(java.lang.String resourceName) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_NAME_PATTERN.matcher(resourceName).matches(),
"Parameter resourceName must conform to the pattern " +
"^.*$");
}
this.resourceName = resourceName;
return this;
}
@Override
public Download set(String parameterName, Object value) {
return (Download) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Partners collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Partners.List request = displayvideo.partners().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Partners partners() {
return new Partners();
}
/**
* The "partners" collection of methods.
*/
public class Partners {
/**
* Bulk edits targeting options under a single partner. The operation will delete the assigned
* targeting options provided in BulkEditPartnerAssignedTargetingOptionsRequest.deleteRequests and
* then create the assigned targeting options provided in
* BulkEditPartnerAssignedTargetingOptionsRequest.createRequests .
*
* Create a request for the method "partners.bulkEditPartnerAssignedTargetingOptions".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkEditPartnerAssignedTargetingOptions#execute()} method to invoke
* the remote operation.
*
* @param partnerId Required. The ID of the partner.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditPartnerAssignedTargetingOptionsRequest}
* @return the request
*/
public BulkEditPartnerAssignedTargetingOptions bulkEditPartnerAssignedTargetingOptions(java.lang.Long partnerId, com.google.api.services.displayvideo.v1.model.BulkEditPartnerAssignedTargetingOptionsRequest content) throws java.io.IOException {
BulkEditPartnerAssignedTargetingOptions result = new BulkEditPartnerAssignedTargetingOptions(partnerId, content);
initialize(result);
return result;
}
public class BulkEditPartnerAssignedTargetingOptions extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkEditPartnerAssignedTargetingOptionsResponse> {
private static final String REST_PATH = "v1/partners/{+partnerId}:bulkEditPartnerAssignedTargetingOptions";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Bulk edits targeting options under a single partner. The operation will delete the assigned
* targeting options provided in BulkEditPartnerAssignedTargetingOptionsRequest.deleteRequests and
* then create the assigned targeting options provided in
* BulkEditPartnerAssignedTargetingOptionsRequest.createRequests .
*
* Create a request for the method "partners.bulkEditPartnerAssignedTargetingOptions".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkEditPartnerAssignedTargetingOptions#execute()} method
* to invoke the remote operation. <p> {@link BulkEditPartnerAssignedTargetingOptions#initialize(c
* om.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be called to
* initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId Required. The ID of the partner.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditPartnerAssignedTargetingOptionsRequest}
* @since 1.13
*/
protected BulkEditPartnerAssignedTargetingOptions(java.lang.Long partnerId, com.google.api.services.displayvideo.v1.model.BulkEditPartnerAssignedTargetingOptionsRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.BulkEditPartnerAssignedTargetingOptionsResponse.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
}
@Override
public BulkEditPartnerAssignedTargetingOptions set$Xgafv(java.lang.String $Xgafv) {
return (BulkEditPartnerAssignedTargetingOptions) super.set$Xgafv($Xgafv);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setAccessToken(java.lang.String accessToken) {
return (BulkEditPartnerAssignedTargetingOptions) super.setAccessToken(accessToken);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setAlt(java.lang.String alt) {
return (BulkEditPartnerAssignedTargetingOptions) super.setAlt(alt);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setCallback(java.lang.String callback) {
return (BulkEditPartnerAssignedTargetingOptions) super.setCallback(callback);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setFields(java.lang.String fields) {
return (BulkEditPartnerAssignedTargetingOptions) super.setFields(fields);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setKey(java.lang.String key) {
return (BulkEditPartnerAssignedTargetingOptions) super.setKey(key);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setOauthToken(java.lang.String oauthToken) {
return (BulkEditPartnerAssignedTargetingOptions) super.setOauthToken(oauthToken);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkEditPartnerAssignedTargetingOptions) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setQuotaUser(java.lang.String quotaUser) {
return (BulkEditPartnerAssignedTargetingOptions) super.setQuotaUser(quotaUser);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setUploadType(java.lang.String uploadType) {
return (BulkEditPartnerAssignedTargetingOptions) super.setUploadType(uploadType);
}
@Override
public BulkEditPartnerAssignedTargetingOptions setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkEditPartnerAssignedTargetingOptions) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the partner. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The ID of the partner.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** Required. The ID of the partner. */
public BulkEditPartnerAssignedTargetingOptions setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public BulkEditPartnerAssignedTargetingOptions set(String parameterName, Object value) {
return (BulkEditPartnerAssignedTargetingOptions) super.set(parameterName, value);
}
}
/**
* Gets a partner.
*
* Create a request for the method "partners.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param partnerId Required. The ID of the partner to fetch.
* @return the request
*/
public Get get(java.lang.Long partnerId) throws java.io.IOException {
Get result = new Get(partnerId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Partner> {
private static final String REST_PATH = "v1/partners/{+partnerId}";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a partner.
*
* Create a request for the method "partners.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId Required. The ID of the partner to fetch.
* @since 1.13
*/
protected Get(java.lang.Long partnerId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Partner.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the partner to fetch. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The ID of the partner to fetch.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** Required. The ID of the partner to fetch. */
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists partners that are accessible to the current user. The order is defined by the order_by
* parameter.
*
* Create a request for the method "partners.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListPartnersResponse> {
private static final String REST_PATH = "v1/partners";
/**
* Lists partners that are accessible to the current user. The order is defined by the order_by
* parameter.
*
* Create a request for the method "partners.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListPartnersResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Allows filtering by partner properties. Supported syntax: * Filter expressions are made up
* of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `entityStatus` Examples: * All active partners: `entityStatus="ENTITY_STATUS_ACTIVE"` The
* length of this field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by partner properties. Supported syntax: * Filter expressions are made up of one
or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence
of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator}
{value}`. * The operator must be `EQUALS (=)`. * Supported fields: - `entityStatus` Examples: * All
active partners: `entityStatus="ENTITY_STATUS_ACTIVE"` The length of this field should be no more
than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by partner properties. Supported syntax: * Filter expressions are made up
* of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical
* operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form
* of `{field} {operator} {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `entityStatus` Examples: * All active partners: `entityStatus="ENTITY_STATUS_ACTIVE"` The
* length of this field should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` The default sorting
* order is ascending. To specify descending order for a field, a suffix "desc" should be
* added to the field name. For example, `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` The default sorting order
is ascending. To specify descending order for a field, a suffix "desc" should be added to the field
name. For example, `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` The default sorting
* order is ascending. To specify descending order for a field, a suffix "desc" should be
* added to the field name. For example, `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListPartners` method. If not
* specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListPartners` method. If not specified, the
first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListPartners` method. If not
* specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Channels collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Channels.List request = displayvideo.channels().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Channels channels() {
return new Channels();
}
/**
* The "channels" collection of methods.
*/
public class Channels {
/**
* Creates a new channel. Returns the newly created channel if successful.
*
* Create a request for the method "channels.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param partnerId The ID of the partner that owns the created channel.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Channel}
* @return the request
*/
public Create create(java.lang.Long partnerId, com.google.api.services.displayvideo.v1.model.Channel content) throws java.io.IOException {
Create result = new Create(partnerId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Channel> {
private static final String REST_PATH = "v1/partners/{+partnerId}/channels";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a new channel. Returns the newly created channel if successful.
*
* Create a request for the method "channels.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId The ID of the partner that owns the created channel.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Channel}
* @since 1.13
*/
protected Create(java.lang.Long partnerId, com.google.api.services.displayvideo.v1.model.Channel content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Channel.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the partner that owns the created channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the created channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the created channel. */
public Create setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** The ID of the advertiser that owns the created channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the created channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the created channel. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Gets a channel for a partner or advertiser.
*
* Create a request for the method "channels.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param partnerId The ID of the partner that owns the fetched channel.
* @param channelId Required. The ID of the channel to fetch.
* @return the request
*/
public Get get(java.lang.Long partnerId, java.lang.Long channelId) throws java.io.IOException {
Get result = new Get(partnerId, channelId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Channel> {
private static final String REST_PATH = "v1/partners/{+partnerId}/channels/{+channelId}";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a channel for a partner or advertiser.
*
* Create a request for the method "channels.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId The ID of the partner that owns the fetched channel.
* @param channelId Required. The ID of the channel to fetch.
* @since 1.13
*/
protected Get(java.lang.Long partnerId, java.lang.Long channelId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Channel.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the partner that owns the fetched channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the fetched channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the fetched channel. */
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** Required. The ID of the channel to fetch. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the channel to fetch.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the channel to fetch. */
public Get setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/** The ID of the advertiser that owns the fetched channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the fetched channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the fetched channel. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists channels for a partner or advertiser.
*
* Create a request for the method "channels.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param partnerId The ID of the partner that owns the channels.
* @return the request
*/
public List list(java.lang.Long partnerId) throws java.io.IOException {
List result = new List(partnerId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListChannelsResponse> {
private static final String REST_PATH = "v1/partners/{+partnerId}/channels";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists channels for a partner or advertiser.
*
* Create a request for the method "channels.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId The ID of the partner that owns the channels.
* @since 1.13
*/
protected List(java.lang.Long partnerId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListChannelsResponse.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the partner that owns the channels. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the channels.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the channels. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** The ID of the advertiser that owns the channels. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the channels.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the channels. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by channel fields. Supported syntax: * Filter expressions for channel
* currently can only contain at most one * restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields:
* - `displayName` Examples: * All channels for which the display name contains "google":
* `displayName : "google"`. The length of this field should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by channel fields. Supported syntax: * Filter expressions for channel currently
can only contain at most one * restriction. * A restriction has the form of `{field} {operator}
{value}`. * The operator must be `CONTAINS (:)`. * Supported fields: - `displayName` Examples: *
All channels for which the display name contains "google": `displayName : "google"`. The length of
this field should be no more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by channel fields. Supported syntax: * Filter expressions for channel
* currently can only contain at most one * restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported fields:
* - `displayName` Examples: * All channels for which the display name contains "google":
* `displayName : "google"`. The length of this field should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `channelId` The default sorting order is ascending. To specify descending order for a
* field, a suffix " desc" should be added to the field name. Example: `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` (default) * `channelId` The
default sorting order is ascending. To specify descending order for a field, a suffix " desc"
should be added to the field name. Example: `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) *
* `channelId` The default sorting order is ascending. To specify descending order for a
* field, a suffix " desc" should be added to the field name. Example: `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListChannels` method. If not
* specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListChannels` method. If not specified, the
first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListChannels` method. If not
* specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates a channel. Returns the updated channel if successful.
*
* Create a request for the method "channels.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param partnerId The ID of the partner that owns the created channel.
* @param channelId Output only. The unique ID of the channel. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Channel}
* @return the request
*/
public Patch patch(java.lang.Long partnerId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.Channel content) throws java.io.IOException {
Patch result = new Patch(partnerId, channelId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Channel> {
private static final String REST_PATH = "v1/partners/{+partnerId}/channels/{channelId}";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates a channel. Returns the updated channel if successful.
*
* Create a request for the method "channels.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId The ID of the partner that owns the created channel.
* @param channelId Output only. The unique ID of the channel. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Channel}
* @since 1.13
*/
protected Patch(java.lang.Long partnerId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.Channel content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Channel.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the partner that owns the created channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the created channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the created channel. */
public Patch setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** Output only. The unique ID of the channel. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Output only. The unique ID of the channel. Assigned by the system.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Output only. The unique ID of the channel. Assigned by the system. */
public Patch setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/** The ID of the advertiser that owns the created channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the created channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the created channel. */
public Patch setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Sites collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Sites.List request = displayvideo.sites().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Sites sites() {
return new Sites();
}
/**
* The "sites" collection of methods.
*/
public class Sites {
/**
* Bulk edits sites under a single channel. The operation will delete the sites provided in
* BulkEditSitesRequest.deleted_sites and then create the sites provided in
* BulkEditSitesRequest.created_sites.
*
* Create a request for the method "sites.bulkEdit".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
*
* @param partnerId The ID of the partner that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the sites belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditSitesRequest}
* @return the request
*/
public BulkEdit bulkEdit(java.lang.Long partnerId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.BulkEditSitesRequest content) throws java.io.IOException {
BulkEdit result = new BulkEdit(partnerId, channelId, content);
initialize(result);
return result;
}
public class BulkEdit extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkEditSitesResponse> {
private static final String REST_PATH = "v1/partners/{partnerId}/channels/{+channelId}/sites:bulkEdit";
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Bulk edits sites under a single channel. The operation will delete the sites provided in
* BulkEditSitesRequest.deleted_sites and then create the sites provided in
* BulkEditSitesRequest.created_sites.
*
* Create a request for the method "sites.bulkEdit".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkEdit#execute()} method to invoke the remote operation.
* <p> {@link
* BulkEdit#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId The ID of the partner that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the sites belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditSitesRequest}
* @since 1.13
*/
protected BulkEdit(java.lang.Long partnerId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.BulkEditSitesRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.BulkEditSitesResponse.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public BulkEdit set$Xgafv(java.lang.String $Xgafv) {
return (BulkEdit) super.set$Xgafv($Xgafv);
}
@Override
public BulkEdit setAccessToken(java.lang.String accessToken) {
return (BulkEdit) super.setAccessToken(accessToken);
}
@Override
public BulkEdit setAlt(java.lang.String alt) {
return (BulkEdit) super.setAlt(alt);
}
@Override
public BulkEdit setCallback(java.lang.String callback) {
return (BulkEdit) super.setCallback(callback);
}
@Override
public BulkEdit setFields(java.lang.String fields) {
return (BulkEdit) super.setFields(fields);
}
@Override
public BulkEdit setKey(java.lang.String key) {
return (BulkEdit) super.setKey(key);
}
@Override
public BulkEdit setOauthToken(java.lang.String oauthToken) {
return (BulkEdit) super.setOauthToken(oauthToken);
}
@Override
public BulkEdit setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkEdit) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkEdit setQuotaUser(java.lang.String quotaUser) {
return (BulkEdit) super.setQuotaUser(quotaUser);
}
@Override
public BulkEdit setUploadType(java.lang.String uploadType) {
return (BulkEdit) super.setUploadType(uploadType);
}
@Override
public BulkEdit setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkEdit) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the partner that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the parent channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the parent channel. */
public BulkEdit setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** Required. The ID of the parent channel to which the sites belong. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the parent channel to which the sites belong.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the parent channel to which the sites belong. */
public BulkEdit setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
@Override
public BulkEdit set(String parameterName, Object value) {
return (BulkEdit) super.set(parameterName, value);
}
}
/**
* Creates a site in a channel.
*
* Create a request for the method "sites.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param partnerId The ID of the partner that owns the parent channel.
* @param channelId Required. The ID of the parent channel in which the site will be created.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Site}
* @return the request
*/
public Create create(java.lang.Long partnerId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.Site content) throws java.io.IOException {
Create result = new Create(partnerId, channelId, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Site> {
private static final String REST_PATH = "v1/partners/{partnerId}/channels/{+channelId}/sites";
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Creates a site in a channel.
*
* Create a request for the method "sites.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId The ID of the partner that owns the parent channel.
* @param channelId Required. The ID of the parent channel in which the site will be created.
* @param content the {@link com.google.api.services.displayvideo.v1.model.Site}
* @since 1.13
*/
protected Create(java.lang.Long partnerId, java.lang.Long channelId, com.google.api.services.displayvideo.v1.model.Site content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Site.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the partner that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the parent channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the parent channel. */
public Create setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** Required. The ID of the parent channel in which the site will be created. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the parent channel in which the site will be created.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the parent channel in which the site will be created. */
public Create setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/** The ID of the advertiser that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the parent channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the parent channel. */
public Create setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a site from a channel.
*
* Create a request for the method "sites.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param partnerId The ID of the partner that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the site belongs.
* @param urlOrAppId Required. The URL or app ID of the site to delete.
* @return the request
*/
public Delete delete(java.lang.Long partnerId, java.lang.Long channelId, java.lang.String urlOrAppId) throws java.io.IOException {
Delete result = new Delete(partnerId, channelId, urlOrAppId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/partners/{partnerId}/channels/{+channelId}/sites/{+urlOrAppId}";
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern URL_OR_APP_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes a site from a channel.
*
* Create a request for the method "sites.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId The ID of the partner that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the site belongs.
* @param urlOrAppId Required. The URL or app ID of the site to delete.
* @since 1.13
*/
protected Delete(java.lang.Long partnerId, java.lang.Long channelId, java.lang.String urlOrAppId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
this.urlOrAppId = com.google.api.client.util.Preconditions.checkNotNull(urlOrAppId, "Required parameter urlOrAppId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(URL_OR_APP_ID_PATTERN.matcher(urlOrAppId).matches(),
"Parameter urlOrAppId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the partner that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the parent channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the parent channel. */
public Delete setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** Required. The ID of the parent channel to which the site belongs. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the parent channel to which the site belongs.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the parent channel to which the site belongs. */
public Delete setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/** Required. The URL or app ID of the site to delete. */
@com.google.api.client.util.Key
private java.lang.String urlOrAppId;
/** Required. The URL or app ID of the site to delete.
*/
public java.lang.String getUrlOrAppId() {
return urlOrAppId;
}
/** Required. The URL or app ID of the site to delete. */
public Delete setUrlOrAppId(java.lang.String urlOrAppId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(URL_OR_APP_ID_PATTERN.matcher(urlOrAppId).matches(),
"Parameter urlOrAppId must conform to the pattern " +
"^[^/]+$");
}
this.urlOrAppId = urlOrAppId;
return this;
}
/** The ID of the advertiser that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the parent channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the parent channel. */
public Delete setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Lists sites in a channel.
*
* Create a request for the method "sites.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param partnerId The ID of the partner that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the requested sites belong.
* @return the request
*/
public List list(java.lang.Long partnerId, java.lang.Long channelId) throws java.io.IOException {
List result = new List(partnerId, channelId);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListSitesResponse> {
private static final String REST_PATH = "v1/partners/{+partnerId}/channels/{+channelId}/sites";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern CHANNEL_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists sites in a channel.
*
* Create a request for the method "sites.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId The ID of the partner that owns the parent channel.
* @param channelId Required. The ID of the parent channel to which the requested sites belong.
* @since 1.13
*/
protected List(java.lang.Long partnerId, java.lang.Long channelId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListSitesResponse.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.channelId = com.google.api.client.util.Preconditions.checkNotNull(channelId, "Required parameter channelId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The ID of the partner that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** The ID of the partner that owns the parent channel.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** The ID of the partner that owns the parent channel. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/** Required. The ID of the parent channel to which the requested sites belong. */
@com.google.api.client.util.Key
private java.lang.Long channelId;
/** Required. The ID of the parent channel to which the requested sites belong.
*/
public java.lang.Long getChannelId() {
return channelId;
}
/** Required. The ID of the parent channel to which the requested sites belong. */
public List setChannelId(java.lang.Long channelId) {
this.channelId = channelId;
return this;
}
/** The ID of the advertiser that owns the parent channel. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** The ID of the advertiser that owns the parent channel.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** The ID of the advertiser that owns the parent channel. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by site fields. Supported syntax: * Filter expressions for site
* currently can only contain at most one * restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported
* fields: - `urlOrAppId` Examples: * All sites for which the URL or app ID contains
* "google": `urlOrAppId : "google"`
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by site fields. Supported syntax: * Filter expressions for site currently can only
contain at most one * restriction. * A restriction has the form of `{field} {operator} {value}`. *
The operator must be `CONTAINS (:)`. * Supported fields: - `urlOrAppId` Examples: * All sites for
which the URL or app ID contains "google": `urlOrAppId : "google"`
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by site fields. Supported syntax: * Filter expressions for site
* currently can only contain at most one * restriction. * A restriction has the form of
* `{field} {operator} {value}`. * The operator must be `CONTAINS (:)`. * Supported
* fields: - `urlOrAppId` Examples: * All sites for which the URL or app ID contains
* "google": `urlOrAppId : "google"`
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `urlOrAppId` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix "
* desc" should be added to the field name. Example: `urlOrAppId desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `urlOrAppId` (default) The default
sorting order is ascending. To specify descending order for a field, a suffix " desc" should be
added to the field name. Example: `urlOrAppId desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `urlOrAppId` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix "
* desc" should be added to the field name. Example: `urlOrAppId desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListSites` method. If not
* specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListSites` method. If not specified, the first
page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListSites` method. If not
* specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the TargetingTypes collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.TargetingTypes.List request = displayvideo.targetingTypes().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public TargetingTypes targetingTypes() {
return new TargetingTypes();
}
/**
* The "targetingTypes" collection of methods.
*/
public class TargetingTypes {
/**
* An accessor for creating requests from the AssignedTargetingOptions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.AssignedTargetingOptions.List request = displayvideo.assignedTargetingOptions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public AssignedTargetingOptions assignedTargetingOptions() {
return new AssignedTargetingOptions();
}
/**
* The "assignedTargetingOptions" collection of methods.
*/
public class AssignedTargetingOptions {
/**
* Assigns a targeting option to a partner. Returns the assigned targeting option if successful.
*
* Create a request for the method "assignedTargetingOptions.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param partnerId Required. The ID of the partner.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL`
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedTargetingOption}
* @return the request
*/
public Create create(java.lang.Long partnerId, java.lang.String targetingType, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption content) throws java.io.IOException {
Create result = new Create(partnerId, targetingType, content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.AssignedTargetingOption> {
private static final String REST_PATH = "v1/partners/{+partnerId}/targetingTypes/{+targetingType}/assignedTargetingOptions";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Assigns a targeting option to a partner. Returns the assigned targeting option if successful.
*
* Create a request for the method "assignedTargetingOptions.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId Required. The ID of the partner.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL`
* @param content the {@link com.google.api.services.displayvideo.v1.model.AssignedTargetingOption}
* @since 1.13
*/
protected Create(java.lang.Long partnerId, java.lang.String targetingType, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the partner. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The ID of the partner.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** Required. The ID of the partner. */
public Create setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL`
*/
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of this assigned targeting option. Supported targeting types: *
`TARGETING_TYPE_CHANNEL`
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL`
*/
public Create setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes an assigned targeting option from a partner.
*
* Create a request for the method "assignedTargetingOptions.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param partnerId Required. The ID of the partner.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL`
* @param assignedTargetingOptionId Required. The ID of the assigned targeting option to delete.
* @return the request
*/
public Delete delete(java.lang.Long partnerId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) throws java.io.IOException {
Delete result = new Delete(partnerId, targetingType, assignedTargetingOptionId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/partners/{+partnerId}/targetingTypes/{+targetingType}/assignedTargetingOptions/{+assignedTargetingOptionId}";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ASSIGNED_TARGETING_OPTION_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes an assigned targeting option from a partner.
*
* Create a request for the method "assignedTargetingOptions.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId Required. The ID of the partner.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL`
* @param assignedTargetingOptionId Required. The ID of the assigned targeting option to delete.
* @since 1.13
*/
protected Delete(java.lang.Long partnerId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = com.google.api.client.util.Preconditions.checkNotNull(assignedTargetingOptionId, "Required parameter assignedTargetingOptionId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the partner. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The ID of the partner.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** Required. The ID of the partner. */
public Delete setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL`
*/
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of this assigned targeting option. Supported targeting types: *
`TARGETING_TYPE_CHANNEL`
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL`
*/
public Delete setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/** Required. The ID of the assigned targeting option to delete. */
@com.google.api.client.util.Key
private java.lang.String assignedTargetingOptionId;
/** Required. The ID of the assigned targeting option to delete.
*/
public java.lang.String getAssignedTargetingOptionId() {
return assignedTargetingOptionId;
}
/** Required. The ID of the assigned targeting option to delete. */
public Delete setAssignedTargetingOptionId(java.lang.String assignedTargetingOptionId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = assignedTargetingOptionId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets a single targeting option assigned to a partner.
*
* Create a request for the method "assignedTargetingOptions.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param partnerId Required. The ID of the partner.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL`
* @param assignedTargetingOptionId Required. An identifier unique to the targeting type in this partner that identifies the assigned
* targeting option being requested.
* @return the request
*/
public Get get(java.lang.Long partnerId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) throws java.io.IOException {
Get result = new Get(partnerId, targetingType, assignedTargetingOptionId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.AssignedTargetingOption> {
private static final String REST_PATH = "v1/partners/{+partnerId}/targetingTypes/{+targetingType}/assignedTargetingOptions/{+assignedTargetingOptionId}";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern ASSIGNED_TARGETING_OPTION_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a single targeting option assigned to a partner.
*
* Create a request for the method "assignedTargetingOptions.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId Required. The ID of the partner.
* @param targetingType Required. Identifies the type of this assigned targeting option. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL`
* @param assignedTargetingOptionId Required. An identifier unique to the targeting type in this partner that identifies the assigned
* targeting option being requested.
* @since 1.13
*/
protected Get(java.lang.Long partnerId, java.lang.String targetingType, java.lang.String assignedTargetingOptionId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.AssignedTargetingOption.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = com.google.api.client.util.Preconditions.checkNotNull(assignedTargetingOptionId, "Required parameter assignedTargetingOptionId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the partner. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The ID of the partner.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** Required. The ID of the partner. */
public Get setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL`
*/
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of this assigned targeting option. Supported targeting types: *
`TARGETING_TYPE_CHANNEL`
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/**
* Required. Identifies the type of this assigned targeting option. Supported targeting
* types: * `TARGETING_TYPE_CHANNEL`
*/
public Get setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/**
* Required. An identifier unique to the targeting type in this partner that identifies
* the assigned targeting option being requested.
*/
@com.google.api.client.util.Key
private java.lang.String assignedTargetingOptionId;
/** Required. An identifier unique to the targeting type in this partner that identifies the assigned
targeting option being requested.
*/
public java.lang.String getAssignedTargetingOptionId() {
return assignedTargetingOptionId;
}
/**
* Required. An identifier unique to the targeting type in this partner that identifies
* the assigned targeting option being requested.
*/
public Get setAssignedTargetingOptionId(java.lang.String assignedTargetingOptionId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(ASSIGNED_TARGETING_OPTION_ID_PATTERN.matcher(assignedTargetingOptionId).matches(),
"Parameter assignedTargetingOptionId must conform to the pattern " +
"^[^/]+$");
}
this.assignedTargetingOptionId = assignedTargetingOptionId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists the targeting options assigned to a partner.
*
* Create a request for the method "assignedTargetingOptions.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param partnerId Required. The ID of the partner.
* @param targetingType Required. Identifies the type of assigned targeting options to list. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL`
* @return the request
*/
public List list(java.lang.Long partnerId, java.lang.String targetingType) throws java.io.IOException {
List result = new List(partnerId, targetingType);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListPartnerAssignedTargetingOptionsResponse> {
private static final String REST_PATH = "v1/partners/{+partnerId}/targetingTypes/{+targetingType}/assignedTargetingOptions";
private final java.util.regex.Pattern PARTNER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists the targeting options assigned to a partner.
*
* Create a request for the method "assignedTargetingOptions.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param partnerId Required. The ID of the partner.
* @param targetingType Required. Identifies the type of assigned targeting options to list. Supported targeting types: *
* `TARGETING_TYPE_CHANNEL`
* @since 1.13
*/
protected List(java.lang.Long partnerId, java.lang.String targetingType) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListPartnerAssignedTargetingOptionsResponse.class);
this.partnerId = com.google.api.client.util.Preconditions.checkNotNull(partnerId, "Required parameter partnerId must be specified.");
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the partner. */
@com.google.api.client.util.Key
private java.lang.Long partnerId;
/** Required. The ID of the partner.
*/
public java.lang.Long getPartnerId() {
return partnerId;
}
/** Required. The ID of the partner. */
public List setPartnerId(java.lang.Long partnerId) {
this.partnerId = partnerId;
return this;
}
/**
* Required. Identifies the type of assigned targeting options to list. Supported
* targeting types: * `TARGETING_TYPE_CHANNEL`
*/
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. Identifies the type of assigned targeting options to list. Supported targeting types: *
`TARGETING_TYPE_CHANNEL`
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/**
* Required. Identifies the type of assigned targeting options to list. Supported
* targeting types: * `TARGETING_TYPE_CHANNEL`
*/
public List setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR`. * A restriction has the form of `{field} {operator}
* {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `assignedTargetingOptionId` Examples: * AssignedTargetingOption with ID 123456
* `assignedTargetingOptionId="123456"` The length of this field should be no more than
* 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by assigned targeting option properties. Supported syntax: * Filter expressions
are made up of one or more restrictions. * Restrictions can be combined by the logical operator
`OR`. * A restriction has the form of `{field} {operator} {value}`. * The operator must be `EQUALS
(=)`. * Supported fields: - `assignedTargetingOptionId` Examples: * AssignedTargetingOption with ID
123456 `assignedTargetingOptionId="123456"` The length of this field should be no more than 500
characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by assigned targeting option properties. Supported syntax: * Filter
* expressions are made up of one or more restrictions. * Restrictions can be combined by
* the logical operator `OR`. * A restriction has the form of `{field} {operator}
* {value}`. * The operator must be `EQUALS (=)`. * Supported fields: -
* `assignedTargetingOptionId` Examples: * AssignedTargetingOption with ID 123456
* `assignedTargetingOptionId="123456"` The length of this field should be no more than
* 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedTargetingOptionId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example:
* `assignedTargetingOptionId desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `assignedTargetingOptionId` (default) The
default sorting order is ascending. To specify descending order for a field, a suffix "desc" should
be added to the field name. Example: `assignedTargetingOptionId desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `assignedTargetingOptionId`
* (default) The default sorting order is ascending. To specify descending order for a
* field, a suffix "desc" should be added to the field name. Example:
* `assignedTargetingOptionId desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to
* `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to
* `ListPartnerAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListPartnerAssignedTargetingOptions` method. If
not specified, the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to
* `ListPartnerAssignedTargetingOptions` method. If not specified, the first page of
* results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
}
/**
* An accessor for creating requests from the Sdfdownloadtasks collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Sdfdownloadtasks.List request = displayvideo.sdfdownloadtasks().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Sdfdownloadtasks sdfdownloadtasks() {
return new Sdfdownloadtasks();
}
/**
* The "sdfdownloadtasks" collection of methods.
*/
public class Sdfdownloadtasks {
/**
* Creates an SDF Download Task. Returns an Operation. An SDF Download Task is a long-running,
* asynchronous operation. The metadata type of this operation is SdfDownloadTaskMetadata. If the
* request is successful, the response type of the operation is SdfDownloadTask. The response will
* not include the download files, which must be retrieved with media.download. The state of
* operation can be retrieved with sdfdownloadtask.operations.get. Any errors can be found in the
* error.message. Note that error.details is expected to be empty.
*
* Create a request for the method "sdfdownloadtasks.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.displayvideo.v1.model.CreateSdfDownloadTaskRequest}
* @return the request
*/
public Create create(com.google.api.services.displayvideo.v1.model.CreateSdfDownloadTaskRequest content) throws java.io.IOException {
Create result = new Create(content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Operation> {
private static final String REST_PATH = "v1/sdfdownloadtasks";
/**
* Creates an SDF Download Task. Returns an Operation. An SDF Download Task is a long-running,
* asynchronous operation. The metadata type of this operation is SdfDownloadTaskMetadata. If the
* request is successful, the response type of the operation is SdfDownloadTask. The response will
* not include the download files, which must be retrieved with media.download. The state of
* operation can be retrieved with sdfdownloadtask.operations.get. Any errors can be found in the
* error.message. Note that error.details is expected to be empty.
*
* Create a request for the method "sdfdownloadtasks.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.displayvideo.v1.model.CreateSdfDownloadTaskRequest}
* @since 1.13
*/
protected Create(com.google.api.services.displayvideo.v1.model.CreateSdfDownloadTaskRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.Operation.class);
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Operations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Operations.List request = displayvideo.operations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Operations operations() {
return new Operations();
}
/**
* The "operations" collection of methods.
*/
public class Operations {
/**
* Gets the latest state of an asynchronous SDF download task operation. Clients should poll this
* method at intervals of 30 seconds.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The name of the operation resource.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Operation> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^sdfdownloadtasks/operations/[^/]+$");
/**
* Gets the latest state of an asynchronous SDF download task operation. Clients should poll this
* method at intervals of 30 seconds.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation resource.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^sdfdownloadtasks/operations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation resource. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation resource.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation resource. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^sdfdownloadtasks/operations/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the TargetingTypes collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.TargetingTypes.List request = displayvideo.targetingTypes().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public TargetingTypes targetingTypes() {
return new TargetingTypes();
}
/**
* The "targetingTypes" collection of methods.
*/
public class TargetingTypes {
/**
* An accessor for creating requests from the TargetingOptions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.TargetingOptions.List request = displayvideo.targetingOptions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public TargetingOptions targetingOptions() {
return new TargetingOptions();
}
/**
* The "targetingOptions" collection of methods.
*/
public class TargetingOptions {
/**
* Gets a single targeting option.
*
* Create a request for the method "targetingOptions.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param targetingType Required. The type of targeting option to retrieve.
* @param targetingOptionId Required. The ID of the of targeting option to retrieve.
* @return the request
*/
public Get get(java.lang.String targetingType, java.lang.String targetingOptionId) throws java.io.IOException {
Get result = new Get(targetingType, targetingOptionId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.TargetingOption> {
private static final String REST_PATH = "v1/targetingTypes/{+targetingType}/targetingOptions/{+targetingOptionId}";
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
private final java.util.regex.Pattern TARGETING_OPTION_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a single targeting option.
*
* Create a request for the method "targetingOptions.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param targetingType Required. The type of targeting option to retrieve.
* @param targetingOptionId Required. The ID of the of targeting option to retrieve.
* @since 1.13
*/
protected Get(java.lang.String targetingType, java.lang.String targetingOptionId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.TargetingOption.class);
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingOptionId = com.google.api.client.util.Preconditions.checkNotNull(targetingOptionId, "Required parameter targetingOptionId must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_OPTION_ID_PATTERN.matcher(targetingOptionId).matches(),
"Parameter targetingOptionId must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The type of targeting option to retrieve. */
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. The type of targeting option to retrieve.
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/** Required. The type of targeting option to retrieve. */
public Get setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/** Required. The ID of the of targeting option to retrieve. */
@com.google.api.client.util.Key
private java.lang.String targetingOptionId;
/** Required. The ID of the of targeting option to retrieve.
*/
public java.lang.String getTargetingOptionId() {
return targetingOptionId;
}
/** Required. The ID of the of targeting option to retrieve. */
public Get setTargetingOptionId(java.lang.String targetingOptionId) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_OPTION_ID_PATTERN.matcher(targetingOptionId).matches(),
"Parameter targetingOptionId must conform to the pattern " +
"^[^/]+$");
}
this.targetingOptionId = targetingOptionId;
return this;
}
/** Required. The Advertiser this request is being made in the context of. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The Advertiser this request is being made in the context of.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The Advertiser this request is being made in the context of. */
public Get setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists targeting options of a given type.
*
* Create a request for the method "targetingOptions.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param targetingType Required. The type of targeting option to be listed.
* @return the request
*/
public List list(java.lang.String targetingType) throws java.io.IOException {
List result = new List(targetingType);
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListTargetingOptionsResponse> {
private static final String REST_PATH = "v1/targetingTypes/{+targetingType}/targetingOptions";
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Lists targeting options of a given type.
*
* Create a request for the method "targetingOptions.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param targetingType Required. The type of targeting option to be listed.
* @since 1.13
*/
protected List(java.lang.String targetingType) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListTargetingOptionsResponse.class);
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The type of targeting option to be listed. */
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. The type of targeting option to be listed.
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/** Required. The type of targeting option to be listed. */
public List setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
/** Required. The Advertiser this request is being made in the context of. */
@com.google.api.client.util.Key
private java.lang.Long advertiserId;
/** Required. The Advertiser this request is being made in the context of.
*/
public java.lang.Long getAdvertiserId() {
return advertiserId;
}
/** Required. The Advertiser this request is being made in the context of. */
public List setAdvertiserId(java.lang.Long advertiserId) {
this.advertiserId = advertiserId;
return this;
}
/**
* Allows filtering by targeting option properties. Supported syntax: * Filter expressions
* are made up of one or more restrictions. * Restrictions can be combined by `OR` logical
* operators. * A restriction has the form of `{field} {operator} {value}`. * The operator
* must be "=" (equal sign). * Supported fields: - `carrierAndIspDetails.type` -
* `geoRegionDetails.geoRegionType` - `targetingOptionId` Examples: * All `GEO REGION`
* targeting options that belong to sub type `GEO_REGION_TYPE_COUNTRY` or
* `GEO_REGION_TYPE_STATE`: `geoRegionDetails.geoRegionType="GEO_REGION_TYPE_COUNTRY" OR
* geoRegionDetails.geoRegionType="GEO_REGION_TYPE_STATE"` * All `CARRIER AND ISP` targeting
* options that belong to sub type `CARRIER_AND_ISP_TYPE_CARRIER`:
* `carrierAndIspDetails.type="CARRIER_AND_ISP_TYPE_CARRIER"`. The length of this field
* should be no more than 500 characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by targeting option properties. Supported syntax: * Filter expressions are made up
of one or more restrictions. * Restrictions can be combined by `OR` logical operators. * A
restriction has the form of `{field} {operator} {value}`. * The operator must be "=" (equal sign).
* Supported fields: - `carrierAndIspDetails.type` - `geoRegionDetails.geoRegionType` -
`targetingOptionId` Examples: * All `GEO REGION` targeting options that belong to sub type
`GEO_REGION_TYPE_COUNTRY` or `GEO_REGION_TYPE_STATE`:
`geoRegionDetails.geoRegionType="GEO_REGION_TYPE_COUNTRY" OR
geoRegionDetails.geoRegionType="GEO_REGION_TYPE_STATE"` * All `CARRIER AND ISP` targeting options
that belong to sub type `CARRIER_AND_ISP_TYPE_CARRIER`:
`carrierAndIspDetails.type="CARRIER_AND_ISP_TYPE_CARRIER"`. The length of this field should be no
more than 500 characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by targeting option properties. Supported syntax: * Filter expressions
* are made up of one or more restrictions. * Restrictions can be combined by `OR` logical
* operators. * A restriction has the form of `{field} {operator} {value}`. * The operator
* must be "=" (equal sign). * Supported fields: - `carrierAndIspDetails.type` -
* `geoRegionDetails.geoRegionType` - `targetingOptionId` Examples: * All `GEO REGION`
* targeting options that belong to sub type `GEO_REGION_TYPE_COUNTRY` or
* `GEO_REGION_TYPE_STATE`: `geoRegionDetails.geoRegionType="GEO_REGION_TYPE_COUNTRY" OR
* geoRegionDetails.geoRegionType="GEO_REGION_TYPE_STATE"` * All `CARRIER AND ISP` targeting
* options that belong to sub type `CARRIER_AND_ISP_TYPE_CARRIER`:
* `carrierAndIspDetails.type="CARRIER_AND_ISP_TYPE_CARRIER"`. The length of this field
* should be no more than 500 characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `targetingOptionId` (default)
* The default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `targetingOptionId desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `targetingOptionId` (default) The default
sorting order is ascending. To specify descending order for a field, a suffix "desc" should be
added to the field name. Example: `targetingOptionId desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `targetingOptionId` (default)
* The default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. Example: `targetingOptionId desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns
error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
* Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListTargetingOptions`
* method. If not specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListTargetingOptions` method. If not specified,
the first page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListTargetingOptions`
* method. If not specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Searches for targeting options of a given type based on the given search terms.
*
* Create a request for the method "targetingOptions.search".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Search#execute()} method to invoke the remote operation.
*
* @param targetingType Required. The type of targeting options to retrieve. Accepted values are: *
* `TARGETING_TYPE_GEO_REGION`
* @param content the {@link com.google.api.services.displayvideo.v1.model.SearchTargetingOptionsRequest}
* @return the request
*/
public Search search(java.lang.String targetingType, com.google.api.services.displayvideo.v1.model.SearchTargetingOptionsRequest content) throws java.io.IOException {
Search result = new Search(targetingType, content);
initialize(result);
return result;
}
public class Search extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.SearchTargetingOptionsResponse> {
private static final String REST_PATH = "v1/targetingTypes/{+targetingType}/targetingOptions:search";
private final java.util.regex.Pattern TARGETING_TYPE_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Searches for targeting options of a given type based on the given search terms.
*
* Create a request for the method "targetingOptions.search".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Search#execute()} method to invoke the remote operation.
* <p> {@link
* Search#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param targetingType Required. The type of targeting options to retrieve. Accepted values are: *
* `TARGETING_TYPE_GEO_REGION`
* @param content the {@link com.google.api.services.displayvideo.v1.model.SearchTargetingOptionsRequest}
* @since 1.13
*/
protected Search(java.lang.String targetingType, com.google.api.services.displayvideo.v1.model.SearchTargetingOptionsRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.SearchTargetingOptionsResponse.class);
this.targetingType = com.google.api.client.util.Preconditions.checkNotNull(targetingType, "Required parameter targetingType must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
}
@Override
public Search set$Xgafv(java.lang.String $Xgafv) {
return (Search) super.set$Xgafv($Xgafv);
}
@Override
public Search setAccessToken(java.lang.String accessToken) {
return (Search) super.setAccessToken(accessToken);
}
@Override
public Search setAlt(java.lang.String alt) {
return (Search) super.setAlt(alt);
}
@Override
public Search setCallback(java.lang.String callback) {
return (Search) super.setCallback(callback);
}
@Override
public Search setFields(java.lang.String fields) {
return (Search) super.setFields(fields);
}
@Override
public Search setKey(java.lang.String key) {
return (Search) super.setKey(key);
}
@Override
public Search setOauthToken(java.lang.String oauthToken) {
return (Search) super.setOauthToken(oauthToken);
}
@Override
public Search setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Search) super.setPrettyPrint(prettyPrint);
}
@Override
public Search setQuotaUser(java.lang.String quotaUser) {
return (Search) super.setQuotaUser(quotaUser);
}
@Override
public Search setUploadType(java.lang.String uploadType) {
return (Search) super.setUploadType(uploadType);
}
@Override
public Search setUploadProtocol(java.lang.String uploadProtocol) {
return (Search) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The type of targeting options to retrieve. Accepted values are: *
* `TARGETING_TYPE_GEO_REGION`
*/
@com.google.api.client.util.Key
private java.lang.String targetingType;
/** Required. The type of targeting options to retrieve. Accepted values are: *
`TARGETING_TYPE_GEO_REGION`
*/
public java.lang.String getTargetingType() {
return targetingType;
}
/**
* Required. The type of targeting options to retrieve. Accepted values are: *
* `TARGETING_TYPE_GEO_REGION`
*/
public Search setTargetingType(java.lang.String targetingType) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TARGETING_TYPE_PATTERN.matcher(targetingType).matches(),
"Parameter targetingType must conform to the pattern " +
"^[^/]+$");
}
this.targetingType = targetingType;
return this;
}
@Override
public Search set(String parameterName, Object value) {
return (Search) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the Users collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code DisplayVideo displayvideo = new DisplayVideo(...);}
* {@code DisplayVideo.Users.List request = displayvideo.users().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Users users() {
return new Users();
}
/**
* The "users" collection of methods.
*/
public class Users {
/**
* Bulk edits user roles for a user. The operation will delete the assigned user roles provided in
* BulkEditAssignedUserRolesRequest.deletedAssignedUserRoles and then assign the user roles provided
* in BulkEditAssignedUserRolesRequest.createdAssignedUserRoles.
*
* Create a request for the method "users.bulkEditAssignedUserRoles".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link BulkEditAssignedUserRoles#execute()} method to invoke the remote
* operation.
*
* @param userId Required. The ID of the user to which the assigned user roles belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditAssignedUserRolesRequest}
* @return the request
*/
public BulkEditAssignedUserRoles bulkEditAssignedUserRoles(java.lang.Long userId, com.google.api.services.displayvideo.v1.model.BulkEditAssignedUserRolesRequest content) throws java.io.IOException {
BulkEditAssignedUserRoles result = new BulkEditAssignedUserRoles(userId, content);
initialize(result);
return result;
}
public class BulkEditAssignedUserRoles extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.BulkEditAssignedUserRolesResponse> {
private static final String REST_PATH = "v1/users/{+userId}:bulkEditAssignedUserRoles";
private final java.util.regex.Pattern USER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Bulk edits user roles for a user. The operation will delete the assigned user roles provided in
* BulkEditAssignedUserRolesRequest.deletedAssignedUserRoles and then assign the user roles
* provided in BulkEditAssignedUserRolesRequest.createdAssignedUserRoles.
*
* Create a request for the method "users.bulkEditAssignedUserRoles".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link BulkEditAssignedUserRoles#execute()} method to invoke the
* remote operation. <p> {@link BulkEditAssignedUserRoles#initialize(com.google.api.client.googlea
* pis.services.AbstractGoogleClientRequest)} must be called to initialize this instance
* immediately after invoking the constructor. </p>
*
* @param userId Required. The ID of the user to which the assigned user roles belong.
* @param content the {@link com.google.api.services.displayvideo.v1.model.BulkEditAssignedUserRolesRequest}
* @since 1.13
*/
protected BulkEditAssignedUserRoles(java.lang.Long userId, com.google.api.services.displayvideo.v1.model.BulkEditAssignedUserRolesRequest content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.BulkEditAssignedUserRolesResponse.class);
this.userId = com.google.api.client.util.Preconditions.checkNotNull(userId, "Required parameter userId must be specified.");
}
@Override
public BulkEditAssignedUserRoles set$Xgafv(java.lang.String $Xgafv) {
return (BulkEditAssignedUserRoles) super.set$Xgafv($Xgafv);
}
@Override
public BulkEditAssignedUserRoles setAccessToken(java.lang.String accessToken) {
return (BulkEditAssignedUserRoles) super.setAccessToken(accessToken);
}
@Override
public BulkEditAssignedUserRoles setAlt(java.lang.String alt) {
return (BulkEditAssignedUserRoles) super.setAlt(alt);
}
@Override
public BulkEditAssignedUserRoles setCallback(java.lang.String callback) {
return (BulkEditAssignedUserRoles) super.setCallback(callback);
}
@Override
public BulkEditAssignedUserRoles setFields(java.lang.String fields) {
return (BulkEditAssignedUserRoles) super.setFields(fields);
}
@Override
public BulkEditAssignedUserRoles setKey(java.lang.String key) {
return (BulkEditAssignedUserRoles) super.setKey(key);
}
@Override
public BulkEditAssignedUserRoles setOauthToken(java.lang.String oauthToken) {
return (BulkEditAssignedUserRoles) super.setOauthToken(oauthToken);
}
@Override
public BulkEditAssignedUserRoles setPrettyPrint(java.lang.Boolean prettyPrint) {
return (BulkEditAssignedUserRoles) super.setPrettyPrint(prettyPrint);
}
@Override
public BulkEditAssignedUserRoles setQuotaUser(java.lang.String quotaUser) {
return (BulkEditAssignedUserRoles) super.setQuotaUser(quotaUser);
}
@Override
public BulkEditAssignedUserRoles setUploadType(java.lang.String uploadType) {
return (BulkEditAssignedUserRoles) super.setUploadType(uploadType);
}
@Override
public BulkEditAssignedUserRoles setUploadProtocol(java.lang.String uploadProtocol) {
return (BulkEditAssignedUserRoles) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the user to which the assigned user roles belong. */
@com.google.api.client.util.Key
private java.lang.Long userId;
/** Required. The ID of the user to which the assigned user roles belong.
*/
public java.lang.Long getUserId() {
return userId;
}
/** Required. The ID of the user to which the assigned user roles belong. */
public BulkEditAssignedUserRoles setUserId(java.lang.Long userId) {
this.userId = userId;
return this;
}
@Override
public BulkEditAssignedUserRoles set(String parameterName, Object value) {
return (BulkEditAssignedUserRoles) super.set(parameterName, value);
}
}
/**
* Creates a new user. Returns the newly created user if successful.
*
* Create a request for the method "users.create".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param content the {@link com.google.api.services.displayvideo.v1.model.User}
* @return the request
*/
public Create create(com.google.api.services.displayvideo.v1.model.User content) throws java.io.IOException {
Create result = new Create(content);
initialize(result);
return result;
}
public class Create extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.User> {
private static final String REST_PATH = "v1/users";
/**
* Creates a new user. Returns the newly created user if successful.
*
* Create a request for the method "users.create".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param content the {@link com.google.api.services.displayvideo.v1.model.User}
* @since 1.13
*/
protected Create(com.google.api.services.displayvideo.v1.model.User content) {
super(DisplayVideo.this, "POST", REST_PATH, content, com.google.api.services.displayvideo.v1.model.User.class);
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a user.
*
* Create a request for the method "users.delete".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param userId Required. The ID of the user to delete.
* @return the request
*/
public Delete delete(java.lang.Long userId) throws java.io.IOException {
Delete result = new Delete(userId);
initialize(result);
return result;
}
public class Delete extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.Empty> {
private static final String REST_PATH = "v1/users/{+userId}";
private final java.util.regex.Pattern USER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Deletes a user.
*
* Create a request for the method "users.delete".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param userId Required. The ID of the user to delete.
* @since 1.13
*/
protected Delete(java.lang.Long userId) {
super(DisplayVideo.this, "DELETE", REST_PATH, null, com.google.api.services.displayvideo.v1.model.Empty.class);
this.userId = com.google.api.client.util.Preconditions.checkNotNull(userId, "Required parameter userId must be specified.");
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the user to delete. */
@com.google.api.client.util.Key
private java.lang.Long userId;
/** Required. The ID of the user to delete.
*/
public java.lang.Long getUserId() {
return userId;
}
/** Required. The ID of the user to delete. */
public Delete setUserId(java.lang.Long userId) {
this.userId = userId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets a user.
*
* Create a request for the method "users.get".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param userId Required. The ID of the user to fetch.
* @return the request
*/
public Get get(java.lang.Long userId) throws java.io.IOException {
Get result = new Get(userId);
initialize(result);
return result;
}
public class Get extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.User> {
private static final String REST_PATH = "v1/users/{+userId}";
private final java.util.regex.Pattern USER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Gets a user.
*
* Create a request for the method "users.get".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param userId Required. The ID of the user to fetch.
* @since 1.13
*/
protected Get(java.lang.Long userId) {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.User.class);
this.userId = com.google.api.client.util.Preconditions.checkNotNull(userId, "Required parameter userId must be specified.");
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The ID of the user to fetch. */
@com.google.api.client.util.Key
private java.lang.Long userId;
/** Required. The ID of the user to fetch.
*/
public java.lang.Long getUserId() {
return userId;
}
/** Required. The ID of the user to fetch. */
public Get setUserId(java.lang.Long userId) {
this.userId = userId;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists users that are accessible to the current user. If two users have user roles on the same
* partner or advertiser, they can access each other.
*
* Create a request for the method "users.list".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @return the request
*/
public List list() throws java.io.IOException {
List result = new List();
initialize(result);
return result;
}
public class List extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.ListUsersResponse> {
private static final String REST_PATH = "v1/users";
/**
* Lists users that are accessible to the current user. If two users have user roles on the same
* partner or advertiser, they can access each other.
*
* Create a request for the method "users.list".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @since 1.13
*/
protected List() {
super(DisplayVideo.this, "GET", REST_PATH, null, com.google.api.services.displayvideo.v1.model.ListUsersResponse.class);
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Allows filtering by user properties. Supported syntax: * Filter expressions are made up of
* one or more restrictions. * Restrictions can be combined by the logical operator `AND`. * A
* restriction has the form of `{field} {operator} {value}`. * The operator must be `CONTAINS
* (:)` or `EQUALS (=)`. * The operator must be `CONTAINS (:)` for the following fields: -
* `displayName` - `email` * The operator must be `EQUALS (=)` for the following fields: -
* `assignedUserRole.userRole` - `assignedUserRole.partnerId` -
* `assignedUserRole.advertiserId` - `assignedUserRole.entityType`: A synthetic field of
* AssignedUserRole used for filtering. Identifies the type of entity to which the user role
* is assigned. Valid values are `Partner` and `Advertiser`. -
* `assignedUserRole.parentPartnerId`: A synthetic field of AssignedUserRole used for
* filtering. Identifies the parent partner of the entity to which the user role is assigned."
* Examples: * The user with displayName containing `foo`: `displayName:"foo"` * The user with
* email containing `bar`: `email:"bar"` * All users with standard user roles:
* `assignedUserRole.userRole="STANDARD"` * All users with user roles for partner 123:
* `assignedUserRole.partnerId="123"` * All users with user roles for advertiser 123:
* `assignedUserRole.advertiserId="123"` * All users with partner level user roles:
* `entityType="PARTNER"` * All users with user roles for partner 123 and advertisers under
* partner 123: `parentPartnerId="123"` The length of this field should be no more than 500
* characters.
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Allows filtering by user properties. Supported syntax: * Filter expressions are made up of one or
more restrictions. * Restrictions can be combined by the logical operator `AND`. * A restriction
has the form of `{field} {operator} {value}`. * The operator must be `CONTAINS (:)` or `EQUALS
(=)`. * The operator must be `CONTAINS (:)` for the following fields: - `displayName` - `email` *
The operator must be `EQUALS (=)` for the following fields: - `assignedUserRole.userRole` -
`assignedUserRole.partnerId` - `assignedUserRole.advertiserId` - `assignedUserRole.entityType`: A
synthetic field of AssignedUserRole used for filtering. Identifies the type of entity to which the
user role is assigned. Valid values are `Partner` and `Advertiser`. -
`assignedUserRole.parentPartnerId`: A synthetic field of AssignedUserRole used for filtering.
Identifies the parent partner of the entity to which the user role is assigned." Examples: * The
user with displayName containing `foo`: `displayName:"foo"` * The user with email containing `bar`:
`email:"bar"` * All users with standard user roles: `assignedUserRole.userRole="STANDARD"` * All
users with user roles for partner 123: `assignedUserRole.partnerId="123"` * All users with user
roles for advertiser 123: `assignedUserRole.advertiserId="123"` * All users with partner level user
roles: `entityType="PARTNER"` * All users with user roles for partner 123 and advertisers under
partner 123: `parentPartnerId="123"` The length of this field should be no more than 500
characters.
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Allows filtering by user properties. Supported syntax: * Filter expressions are made up of
* one or more restrictions. * Restrictions can be combined by the logical operator `AND`. * A
* restriction has the form of `{field} {operator} {value}`. * The operator must be `CONTAINS
* (:)` or `EQUALS (=)`. * The operator must be `CONTAINS (:)` for the following fields: -
* `displayName` - `email` * The operator must be `EQUALS (=)` for the following fields: -
* `assignedUserRole.userRole` - `assignedUserRole.partnerId` -
* `assignedUserRole.advertiserId` - `assignedUserRole.entityType`: A synthetic field of
* AssignedUserRole used for filtering. Identifies the type of entity to which the user role
* is assigned. Valid values are `Partner` and `Advertiser`. -
* `assignedUserRole.parentPartnerId`: A synthetic field of AssignedUserRole used for
* filtering. Identifies the parent partner of the entity to which the user role is assigned."
* Examples: * The user with displayName containing `foo`: `displayName:"foo"` * The user with
* email containing `bar`: `email:"bar"` * All users with standard user roles:
* `assignedUserRole.userRole="STANDARD"` * All users with user roles for partner 123:
* `assignedUserRole.partnerId="123"` * All users with user roles for advertiser 123:
* `assignedUserRole.advertiserId="123"` * All users with partner level user roles:
* `entityType="PARTNER"` * All users with user roles for partner 123 and advertisers under
* partner 123: `parentPartnerId="123"` The length of this field should be no more than 500
* characters.
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. For example, `displayName desc`.
*/
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Field by which to sort the list. Acceptable values are: * `displayName` (default) The default
sorting order is ascending. To specify descending order for a field, a suffix "desc" should be
added to the field name. For example, `displayName desc`.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/**
* Field by which to sort the list. Acceptable values are: * `displayName` (default) The
* default sorting order is ascending. To specify descending order for a field, a suffix
* "desc" should be added to the field name. For example, `displayName desc`.
*/
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Requested page size. Must be between `1` and `100`. If unspecified will default to `100`.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListUsers` method. If not
* specified, the first page of results will be returned.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A token identifying a page of results the server should return. Typically, this is the value of
next_page_token returned from the previous call to `ListUsers` method. If not specified, the first
page of results will be returned.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A token identifying a page of results the server should return. Typically, this is the
* value of next_page_token returned from the previous call to `ListUsers` method. If not
* specified, the first page of results will be returned.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates an existing user. Returns the updated user if successful.
*
* Create a request for the method "users.patch".
*
* This request holds the parameters needed by the displayvideo server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param userId Output only. The unique ID of the user. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.User}
* @return the request
*/
public Patch patch(java.lang.Long userId, com.google.api.services.displayvideo.v1.model.User content) throws java.io.IOException {
Patch result = new Patch(userId, content);
initialize(result);
return result;
}
public class Patch extends DisplayVideoRequest<com.google.api.services.displayvideo.v1.model.User> {
private static final String REST_PATH = "v1/users/{+userId}";
private final java.util.regex.Pattern USER_ID_PATTERN =
java.util.regex.Pattern.compile("^[^/]+$");
/**
* Updates an existing user. Returns the updated user if successful.
*
* Create a request for the method "users.patch".
*
* This request holds the parameters needed by the the displayvideo server. After setting any
* optional parameters, call the {@link Patch#execute()} method to invoke the remote operation.
* <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param userId Output only. The unique ID of the user. Assigned by the system.
* @param content the {@link com.google.api.services.displayvideo.v1.model.User}
* @since 1.13
*/
protected Patch(java.lang.Long userId, com.google.api.services.displayvideo.v1.model.User content) {
super(DisplayVideo.this, "PATCH", REST_PATH, content, com.google.api.services.displayvideo.v1.model.User.class);
this.userId = com.google.api.client.util.Preconditions.checkNotNull(userId, "Required parameter userId must be specified.");
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/** Output only. The unique ID of the user. Assigned by the system. */
@com.google.api.client.util.Key
private java.lang.Long userId;
/** Output only. The unique ID of the user. Assigned by the system.
*/
public java.lang.Long getUserId() {
return userId;
}
/** Output only. The unique ID of the user. Assigned by the system. */
public Patch setUserId(java.lang.Long userId) {
this.userId = userId;
return this;
}
/** Required. The mask to control which fields to update. */
@com.google.api.client.util.Key
private String updateMask;
/** Required. The mask to control which fields to update.
*/
public String getUpdateMask() {
return updateMask;
}
/** Required. The mask to control which fields to update. */
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
}
/**
* Builder for {@link DisplayVideo}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link DisplayVideo}. */
@Override
public DisplayVideo build() {
return new DisplayVideo(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link DisplayVideoRequestInitializer}.
*
* @since 1.12
*/
public Builder setDisplayVideoRequestInitializer(
DisplayVideoRequestInitializer displayvideoRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(displayvideoRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
| [
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
]
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT"] | java | 1 | 0 | |
config/config.go | package config
import (
"fmt"
"io/ioutil"
"log"
"os"
yaml "github.com/go-yaml/yaml"
jwt "github.com/dgrijalva/jwt-go"
"github.com/labstack/echo/middleware"
)
// Environment in which the application runs
type Environment string
// Environments contains all the possible environments
var Environments = map[string]Environment{
"dev": "dev.yml",
"prod": "prod.yml",
"test": "test.yml",
}
// Config contains the necessary application configuration
type Config struct {
ConnectionString string `yaml:"connection_string"`
Database string `yaml:"database"`
Debug bool `yaml:"debug"`
Environment Environment
JWTKey string `yaml:"JWT_key"`
MigrationsPath string `yaml:"migrations_path"`
}
// GetCompleteConnectionString returns the connection string based on the current config
func (config Config) GetCompleteConnectionString() string {
return fmt.Sprintf("%s%s", config.ConnectionString, config.Database)
}
// Local static variables
var config = Config{}
var environment = Environments["dev"]
// SetEnviroment sets the application wide environment
func SetEnviroment(env Environment) {
environment = env
}
// GetConfig returns the application wide configuration
func GetConfig() Config {
// Return config if we have already loaded it
if (Config{}) != config {
return config
}
// Get config file path
// @TODO: change how file path is handled
appPath := os.Getenv("APP_PATH")
if appPath == "" {
appPath = fmt.Sprintf("%s/src/github.com/antonve/logger-api", os.Getenv("GOPATH"))
}
// Load config file data
configData, err := ioutil.ReadFile(fmt.Sprintf("%s/config/%s", appPath, environment))
if err != nil {
log.Fatalf("Could not load config for environment `%s`", environment)
}
// Parse config file
config = Config{
Environment: environment,
}
err = yaml.Unmarshal(configData, &config)
if err != nil {
log.Fatalf("Could not parse config for environment `%s`", environment)
}
return config
}
// GetJWTConfig returns the JWT config
func GetJWTConfig(claims jwt.Claims) middleware.JWTConfig {
return middleware.JWTConfig{
Claims: claims,
SigningKey: []byte(GetConfig().JWTKey),
ContextKey: "user",
}
}
| [
"\"APP_PATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH",
"APP_PATH"
]
| [] | ["GOPATH", "APP_PATH"] | go | 2 | 0 | |
share/qt/extract_strings_qt.py | #!/usr/bin/env python3
# Copyright (c) 2012-2019 The Zortcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/zortcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *zortcoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("zortcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("zortcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| []
| []
| [
"COPYRIGHT_HOLDERS",
"XGETTEXT"
]
| [] | ["COPYRIGHT_HOLDERS", "XGETTEXT"] | python | 2 | 0 | |
src/syscall/syscall_unix_test.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package syscall_test
import (
"flag"
"fmt"
"internal/testenv"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"syscall"
"testing"
"time"
)
// Tests that below functions, structures and constants are consistent
// on all Unix-like systems.
func _() {
// program scheduling priority functions and constants
var (
_ func(int, int, int) error = syscall.Setpriority
_ func(int, int) (int, error) = syscall.Getpriority
)
const (
_ int = syscall.PRIO_USER
_ int = syscall.PRIO_PROCESS
_ int = syscall.PRIO_PGRP
)
// termios constants
const (
_ int = syscall.TCIFLUSH
_ int = syscall.TCIOFLUSH
_ int = syscall.TCOFLUSH
)
// fcntl file locking structure and constants
var (
_ = syscall.Flock_t{
Type: int16(0),
Whence: int16(0),
Start: int64(0),
Len: int64(0),
Pid: int32(0),
}
)
const (
_ = syscall.F_GETLK
_ = syscall.F_SETLK
_ = syscall.F_SETLKW
)
}
// TestFcntlFlock tests whether the file locking structure matches
// the calling convention of each kernel.
// On some Linux systems, glibc uses another set of values for the
// commands and translates them to the correct value that the kernel
// expects just before the actual fcntl syscall. As Go uses raw
// syscalls directly, it must use the real value, not the glibc value.
// Thus this test also verifies that the Flock_t structure can be
// roundtripped with F_SETLK and F_GETLK.
func TestFcntlFlock(t *testing.T) {
if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
t.Skip("skipping; no child processes allowed on iOS")
}
flock := syscall.Flock_t{
Type: syscall.F_WRLCK,
Start: 31415, Len: 271828, Whence: 1,
}
if os.Getenv("GO_WANT_HELPER_PROCESS") == "" {
// parent
tempDir, err := ioutil.TempDir("", "TestFcntlFlock")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
name := filepath.Join(tempDir, "TestFcntlFlock")
fd, err := syscall.Open(name, syscall.O_CREAT|syscall.O_RDWR|syscall.O_CLOEXEC, 0)
if err != nil {
t.Fatalf("Open failed: %v", err)
}
defer os.RemoveAll(tempDir)
defer syscall.Close(fd)
if err := syscall.Ftruncate(fd, 1<<20); err != nil {
t.Fatalf("Ftruncate(1<<20) failed: %v", err)
}
if err := syscall.FcntlFlock(uintptr(fd), syscall.F_SETLK, &flock); err != nil {
t.Fatalf("FcntlFlock(F_SETLK) failed: %v", err)
}
cmd := exec.Command(os.Args[0], "-test.run=^TestFcntlFlock$")
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.ExtraFiles = []*os.File{os.NewFile(uintptr(fd), name)}
out, err := cmd.CombinedOutput()
if len(out) > 0 || err != nil {
t.Fatalf("child process: %q, %v", out, err)
}
} else {
// child
got := flock
// make sure the child lock is conflicting with the parent lock
got.Start--
got.Len++
if err := syscall.FcntlFlock(3, syscall.F_GETLK, &got); err != nil {
t.Fatalf("FcntlFlock(F_GETLK) failed: %v", err)
}
flock.Pid = int32(syscall.Getppid())
// Linux kernel always set Whence to 0
flock.Whence = 0
if got.Type == flock.Type && got.Start == flock.Start && got.Len == flock.Len && got.Pid == flock.Pid && got.Whence == flock.Whence {
os.Exit(0)
}
t.Fatalf("FcntlFlock got %v, want %v", got, flock)
}
}
// TestPassFD tests passing a file descriptor over a Unix socket.
//
// This test involved both a parent and child process. The parent
// process is invoked as a normal test, with "go test", which then
// runs the child process by running the current test binary with args
// "-test.run=^TestPassFD$" and an environment variable used to signal
// that the test should become the child process instead.
func TestPassFD(t *testing.T) {
testenv.MustHaveExec(t)
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
passFDChild()
return
}
if runtime.GOOS == "aix" {
// Unix network isn't properly working on AIX 7.2 with Technical Level < 2
out, err := exec.Command("oslevel", "-s").Output()
if err != nil {
t.Skipf("skipping on AIX because oslevel -s failed: %v", err)
}
if len(out) < len("7200-XX-ZZ-YYMM") { // AIX 7.2, Tech Level XX, Service Pack ZZ, date YYMM
t.Skip("skipping on AIX because oslevel -s hasn't the right length")
}
aixVer := string(out[:4])
tl, err := strconv.Atoi(string(out[5:7]))
if err != nil {
t.Skipf("skipping on AIX because oslevel -s output cannot be parsed: %v", err)
}
if aixVer < "7200" || (aixVer == "7200" && tl < 2) {
t.Skip("skipped on AIX versions previous to 7.2 TL 2")
}
}
tempDir, err := ioutil.TempDir("", "TestPassFD")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM, 0)
if err != nil {
t.Fatalf("Socketpair: %v", err)
}
defer syscall.Close(fds[0])
defer syscall.Close(fds[1])
writeFile := os.NewFile(uintptr(fds[0]), "child-writes")
readFile := os.NewFile(uintptr(fds[1]), "parent-reads")
defer writeFile.Close()
defer readFile.Close()
cmd := exec.Command(os.Args[0], "-test.run=^TestPassFD$", "--", tempDir)
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.ExtraFiles = []*os.File{writeFile}
out, err := cmd.CombinedOutput()
if len(out) > 0 || err != nil {
t.Fatalf("child process: %q, %v", out, err)
}
c, err := net.FileConn(readFile)
if err != nil {
t.Fatalf("FileConn: %v", err)
}
defer c.Close()
uc, ok := c.(*net.UnixConn)
if !ok {
t.Fatalf("unexpected FileConn type; expected UnixConn, got %T", c)
}
buf := make([]byte, 32) // expect 1 byte
oob := make([]byte, 32) // expect 24 bytes
closeUnix := time.AfterFunc(5*time.Second, func() {
t.Logf("timeout reading from unix socket")
uc.Close()
})
_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)
if err != nil {
t.Fatalf("ReadMsgUnix: %v", err)
}
closeUnix.Stop()
scms, err := syscall.ParseSocketControlMessage(oob[:oobn])
if err != nil {
t.Fatalf("ParseSocketControlMessage: %v", err)
}
if len(scms) != 1 {
t.Fatalf("expected 1 SocketControlMessage; got scms = %#v", scms)
}
scm := scms[0]
gotFds, err := syscall.ParseUnixRights(&scm)
if err != nil {
t.Fatalf("syscall.ParseUnixRights: %v", err)
}
if len(gotFds) != 1 {
t.Fatalf("wanted 1 fd; got %#v", gotFds)
}
f := os.NewFile(uintptr(gotFds[0]), "fd-from-child")
defer f.Close()
got, err := ioutil.ReadAll(f)
want := "Hello from child process!\n"
if string(got) != want {
t.Errorf("child process ReadAll: %q, %v; want %q", got, err, want)
}
}
// passFDChild is the child process used by TestPassFD.
func passFDChild() {
defer os.Exit(0)
// Look for our fd. It should be fd 3, but we work around an fd leak
// bug here (https://golang.org/issue/2603) to let it be elsewhere.
var uc *net.UnixConn
for fd := uintptr(3); fd <= 10; fd++ {
f := os.NewFile(fd, "unix-conn")
var ok bool
netc, _ := net.FileConn(f)
uc, ok = netc.(*net.UnixConn)
if ok {
break
}
}
if uc == nil {
fmt.Println("failed to find unix fd")
return
}
// Make a file f to send to our parent process on uc.
// We make it in tempDir, which our parent will clean up.
flag.Parse()
tempDir := flag.Arg(0)
f, err := ioutil.TempFile(tempDir, "")
if err != nil {
fmt.Printf("TempFile: %v", err)
return
}
f.Write([]byte("Hello from child process!\n"))
f.Seek(0, io.SeekStart)
rights := syscall.UnixRights(int(f.Fd()))
dummyByte := []byte("x")
n, oobn, err := uc.WriteMsgUnix(dummyByte, rights, nil)
if err != nil {
fmt.Printf("WriteMsgUnix: %v", err)
return
}
if n != 1 || oobn != len(rights) {
fmt.Printf("WriteMsgUnix = %d, %d; want 1, %d", n, oobn, len(rights))
return
}
}
// TestUnixRightsRoundtrip tests that UnixRights, ParseSocketControlMessage,
// and ParseUnixRights are able to successfully round-trip lists of file descriptors.
func TestUnixRightsRoundtrip(t *testing.T) {
testCases := [...][][]int{
{{42}},
{{1, 2}},
{{3, 4, 5}},
{{}},
{{1, 2}, {3, 4, 5}, {}, {7}},
}
for _, testCase := range testCases {
b := []byte{}
var n int
for _, fds := range testCase {
// Last assignment to n wins
n = len(b) + syscall.CmsgLen(4*len(fds))
b = append(b, syscall.UnixRights(fds...)...)
}
// Truncate b
b = b[:n]
scms, err := syscall.ParseSocketControlMessage(b)
if err != nil {
t.Fatalf("ParseSocketControlMessage: %v", err)
}
if len(scms) != len(testCase) {
t.Fatalf("expected %v SocketControlMessage; got scms = %#v", len(testCase), scms)
}
for i, scm := range scms {
gotFds, err := syscall.ParseUnixRights(&scm)
if err != nil {
t.Fatalf("ParseUnixRights: %v", err)
}
wantFds := testCase[i]
if len(gotFds) != len(wantFds) {
t.Fatalf("expected %v fds, got %#v", len(wantFds), gotFds)
}
for j, fd := range gotFds {
if fd != wantFds[j] {
t.Fatalf("expected fd %v, got %v", wantFds[j], fd)
}
}
}
}
}
func TestRlimit(t *testing.T) {
var rlimit, zero syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
t.Fatalf("Getrlimit: save failed: %v", err)
}
if zero == rlimit {
t.Fatalf("Getrlimit: save failed: got zero value %#v", rlimit)
}
set := rlimit
set.Cur = set.Max - 1
if runtime.GOOS == "darwin" && set.Cur > 10240 {
// The max file limit is 10240, even though
// the max returned by Getrlimit is 1<<63-1.
// This is OPEN_MAX in sys/syslimits.h.
set.Cur = 10240
}
err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &set)
if err != nil {
t.Fatalf("Setrlimit: set failed: %#v %v", set, err)
}
var get syscall.Rlimit
err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &get)
if err != nil {
t.Fatalf("Getrlimit: get failed: %v", err)
}
set = rlimit
set.Cur = set.Max - 1
if runtime.GOOS == "darwin" && set.Cur > 10240 {
set.Cur = 10240
}
if set != get {
t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get)
}
err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
t.Fatalf("Setrlimit: restore failed: %#v %v", rlimit, err)
}
}
func TestSeekFailure(t *testing.T) {
_, err := syscall.Seek(-1, 0, io.SeekStart)
if err == nil {
t.Fatalf("Seek(-1, 0, 0) did not fail")
}
str := err.Error() // used to crash on Linux
t.Logf("Seek: %v", str)
if str == "" {
t.Fatalf("Seek(-1, 0, 0) return error with empty message")
}
}
func TestSetsockoptString(t *testing.T) {
// should not panic on empty string, see issue #31277
err := syscall.SetsockoptString(-1, 0, 0, "")
if err == nil {
t.Fatalf("SetsockoptString: did not fail")
}
}
| [
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS\""
]
| []
| [
"GO_WANT_HELPER_PROCESS"
]
| [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
exec/jvm/sandbox.go | /*
* Copyright 1999-2020 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jvm
import (
"context"
"fmt"
"os"
osuser "os/user"
"path"
"strconv"
"strings"
"time"
"github.com/chaosblade-io/chaosblade-spec-go/channel"
"github.com/chaosblade-io/chaosblade-spec-go/spec"
"github.com/chaosblade-io/chaosblade-spec-go/util"
"github.com/shirou/gopsutil/process"
"github.com/sirupsen/logrus"
)
// attach sandbox to java process
var cl = channel.NewLocalChannel()
const DefaultNamespace = "default"
func Attach(port string, javaHome string, pid string) (*spec.Response, string) {
// refresh
response, username := attach(pid, port, context.TODO(), javaHome)
if !response.Success {
return response, username
}
time.Sleep(5 * time.Second)
// active
response = active(port)
if !response.Success {
return response, username
}
// check
return check(port), username
}
// curl -s http://localhost:$2/sandbox/default/module/http/chaosblade/status 2>&1
func check(port string) *spec.Response {
url := getSandboxUrl(port, "chaosblade/status", "")
result, err, code := util.Curl(url)
if code == 200 {
return spec.ReturnSuccess(result)
}
if err != nil {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError], err.Error())
}
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError],
fmt.Sprintf("response code is %d, result: %s", code, result))
}
// active chaosblade bin/sandbox.sh -p $pid -P $2 -a chaosblade 2>&1
func active(port string) *spec.Response {
url := getSandboxUrl(port, "sandbox-module-mgr/active", "&ids=chaosblade")
result, err, code := util.Curl(url)
if err != nil {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError], err.Error())
}
if code != 200 {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError],
fmt.Sprintf("active module response code: %d, result: %s", code, result))
}
return spec.ReturnSuccess("success")
}
// attach java agent to application process
func attach(pid, port string, ctx context.Context, javaHome string) (*spec.Response, string) {
username, err := getUsername(pid)
if err != nil {
return spec.ReturnFail(spec.Code[spec.StatusError],
fmt.Sprintf("get username failed by %s pid, %v", pid, err)), ""
}
javaBin, javaHome := getJavaBinAndJavaHome(javaHome, ctx, pid)
toolsJar := getToolJar(javaHome)
logrus.Debugf("javaBin: %s, javaHome: %s, toolsJar: %s", javaBin, javaHome, toolsJar)
token, err := getSandboxToken(ctx)
if err != nil {
return spec.ReturnFail(spec.Code[spec.ServerError],
fmt.Sprintf("create sandbox token failed, %v", err)), username
}
javaArgs := getAttachJvmOpts(toolsJar, token, port, pid)
currUser, err := osuser.Current()
if err != nil {
logrus.Warnf("get current user info failed, %v", err)
}
var response *spec.Response
if currUser != nil && (currUser.Username == username) {
response = cl.Run(ctx, javaBin, javaArgs)
} else {
if currUser != nil {
logrus.Debugf("current user name is %s, not equal %s, so use sudo command to execute",
currUser.Username, username)
}
response = cl.Run(ctx, "sudo", fmt.Sprintf("-u %s %s %s", username, javaBin, javaArgs))
}
if !response.Success {
return response, username
}
response = cl.Run(ctx, "grep", fmt.Sprintf(`%s %s | grep %s | tail -1 | awk -F ";" '{print $3";"$4}'`,
token, getSandboxTokenFile(username), DefaultNamespace))
// if attach successfully, the sandbox-agent.jar will write token to local file
if !response.Success {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError],
fmt.Sprintf("attach JVM %s failed, loss response; %s", pid, response.Err)), username
}
return response, username
}
func getAttachJvmOpts(toolsJar string, token string, port string, pid string) string {
jvmOpts := fmt.Sprintf("-Xms128M -Xmx128M -Xnoclassgc -ea -Xbootclasspath/a:%s", toolsJar)
sandboxHome := path.Join(util.GetLibHome(), "sandbox")
sandboxLibPath := path.Join(sandboxHome, "lib")
sandboxAttachArgs := fmt.Sprintf("home=%s;token=%s;server.ip=%s;server.port=%s;namespace=%s",
sandboxHome, token, "127.0.0.1", port, DefaultNamespace)
javaArgs := fmt.Sprintf(`%s -jar %s/sandbox-core.jar %s "%s/sandbox-agent.jar" "%s"`,
jvmOpts, sandboxLibPath, pid, sandboxLibPath, sandboxAttachArgs)
return javaArgs
}
func getSandboxToken(ctx context.Context) (string, error) {
// create sandbox token
response := cl.Run(ctx, "date", "| head | cksum | sed 's/ //g'")
if !response.Success {
return "", fmt.Errorf(response.Err)
}
token := strings.TrimSpace(response.Result.(string))
return token, nil
}
func getToolJar(javaHome string) string {
toolsJar := path.Join(util.GetBinPath(), "tools.jar")
originalJar := path.Join(javaHome, "lib/tools.jar")
if util.IsExist(originalJar) {
toolsJar = originalJar
} else {
logrus.Warningf("using chaosblade default tools.jar, %s", toolsJar)
}
return toolsJar
}
func getUsername(pid string) (string, error) {
p, err := strconv.Atoi(pid)
if err != nil {
return "", err
}
javaProcess, err := process.NewProcess(int32(p))
if err != nil {
return "", err
}
return javaProcess.Username()
}
func getJavaBinAndJavaHome(javaHome string, ctx context.Context, pid string) (string, string) {
javaBin := "java"
if javaHome != "" {
javaBin = path.Join(javaHome, "bin/java")
return javaBin, javaHome
}
if javaHome = strings.TrimSpace(os.Getenv("JAVA_HOME")); javaHome != "" {
javaBin = path.Join(javaHome, "bin/java")
return javaBin, javaHome
}
psArgs := cl.GetPsArgs()
response := cl.Run(ctx, "ps", fmt.Sprintf(`%s | grep -w %s | grep java | grep -v grep | awk '{print $4}'`,
psArgs, pid))
if response.Success {
javaBin = strings.TrimSpace(response.Result.(string))
}
if strings.HasSuffix(javaBin, "/bin/java") {
javaHome = javaBin[:len(javaBin)-9]
}
return javaBin, javaHome
}
func Detach(port string) *spec.Response {
return shutdown(port)
}
// CheckPortFromSandboxToken will read last line and curl the port for testing connectivity
func CheckPortFromSandboxToken(username string) (port string, err error) {
port, err = getPortFromSandboxToken(username)
if err != nil {
return port, err
}
versionUrl := getSandboxUrl(port, "sandbox-info/version", "")
_, err, _ = util.Curl(versionUrl)
if err != nil {
return "", err
}
return port, nil
}
func getPortFromSandboxToken(username string) (port string, err error) {
response := cl.Run(context.TODO(), "grep",
fmt.Sprintf(`%s %s | tail -1 | awk -F ";" '{print $4}'`,
DefaultNamespace, getSandboxTokenFile(username)))
if !response.Success {
return "", fmt.Errorf(response.Err)
}
if response.Result == nil {
return "", fmt.Errorf("get empty from sandbox token file")
}
port = strings.TrimSpace(response.Result.(string))
if port == "" {
return "", fmt.Errorf("read empty from sandbox token file")
}
_, err = strconv.Atoi(port)
if err != nil {
return "", fmt.Errorf("can not find port from sandbox token file, %v", err)
}
return port, nil
}
// sudo -u $user -H bash bin/sandbox.sh -p $pid -S 2>&1
func shutdown(port string) *spec.Response {
url := getSandboxUrl(port, "sandbox-control/shutdown", "")
result, err, code := util.Curl(url)
if err != nil {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError], err.Error())
}
if code != 200 {
return spec.ReturnFail(spec.Code[spec.SandboxInvokeError],
fmt.Sprintf("shutdown module response code: %d, result: %s", code, result))
}
return spec.ReturnSuccess("success")
}
func getSandboxUrl(port, uri, param string) string {
// "sandbox-module-mgr/reset"
return fmt.Sprintf("http://127.0.0.1:%s/sandbox/%s/module/http/%s?1=1%s",
port, DefaultNamespace, uri, param)
}
func getSandboxTokenFile(username string) string {
userHome := util.GetSpecifyingUserHome(username)
return path.Join(userHome, ".sandbox.token")
}
| [
"\"JAVA_HOME\""
]
| []
| [
"JAVA_HOME"
]
| [] | ["JAVA_HOME"] | go | 1 | 0 | |
app/admin/main/tag/service/channel_test.go | package service
// import (
// "context"
// "testing"
// . "github.com/smartystreets/goconvey/convey"
// )
// func TestServiceChannel(t *testing.T) {
// var (
// id = int64(22)
// operator = "unit test"
// order = "ctime"
// sort = "DESC"
// tp = int32(2)
// ps int32 = 1
// pn int32 = 20
// )
// Convey("ChanneList", func() {
// testSvc.ChanneList(context.TODO(), []int64{id}, operator, order, sort, "", "", tp, -1, -1, pn, ps)
// })
// Convey("ChannelState", func() {
// testSvc.ChannelState(context.TODO(), id, 0)
// })
// Convey("ChannelCategory", func() {
// testSvc.ChannelCategory(context.TODO())
// })
// Convey("CategoryAdd", func() {
// testSvc.CategoryAdd(context.TODO(), "unit")
// })
// Convey("DeleteCategory", func() {
// testSvc.DeleteCategory(context.TODO(), id)
// })
// Convey("StateCategory", func() {
// testSvc.DeleteCategory(context.TODO(), id)
// })
// Convey("DeletALlChanRule", func() {
// testSvc.DeletALlChanRule(context.TODO(), id)
// })
// }
| []
| []
| []
| [] | [] | go | null | null | null |
testing_school/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testing_school.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
ape210k_t5_byt5.py | import pandas as pd
from tqdm import tqdm
from sympy import sympify
from hanspell import spell_checker
import ast
import json
import re
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
import torch
from transformers import TrainingArguments, Trainer
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import EarlyStoppingCallback
#from transformers import T5ForConditionalGeneration, AutoTokenizer
#from transformers import MT5ForConditionalGeneration, MT5TokenizerFast
from transformers import T5ForConditionalGeneration, AutoTokenizer
# from transformers import MT5Model, T5Tokenizer
import os
os.environ["TOKENIZERS_PARALLELISM"] = "true"
import wandb
wandb.init(project="hug_ape210k", name="byt5")
#tags=["baseline", "high-lr"],
#group="bert"
train_df = pd.read_csv("train.tsv", sep="\t").astype(str)
eval_df = pd.read_csv("eval.tsv", sep="\t").astype(str)
# Create torch dataset
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
if self.labels is not None:
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.encodings["input_ids"])
# prefix = '~/Git/T5_test/data/binary_classification/'
# Read data
# data = pd.read_csv(prefix + "train.csv", header=None)
# data = data.rename(columns={0:'sentiment', 1:'review'})
# #data = data.iloc[:2000]
# Define pretrained tokenizer and model
# model_name = "bert-base-uncased"
# tokenizer = BertTokenizer.from_pretrained(model_name)
# model = BertForSequenceClassification.from_pretrained(model_name, num_labels=2)
#tokenizer = AutoTokenizer.from_pretrained('google/mt5-small')
#model = MT5ForConditionalGeneration.from_pretrained('google/mt5-large')
# model = MT5Model.from_pretrained('google/mt5-small') #, num_labels=2)
model = T5ForConditionalGeneration.from_pretrained('google/byt5-large')
#tokenizer = MT5TokenizerFast.from_pretrained('google/mt5-large')
tokenizer = AutoTokenizer.from_pretrained('google/byt5-large')
X_train = train_df.input_text.tolist()
y_train = train_df.target_text.tolist()
X_val = eval_df.input_text.tolist()
y_val = eval_df.target_text.tolist()
# ----- 1. Preprocess data -----#
# Preprocess data
# X = list(data["review"])
# y = list(data["sentiment"])
# X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1)
X_train_tokenized = tokenizer(X_train, padding=True, truncation=True, max_length=512)
X_val_tokenized = tokenizer(X_val, padding=True, truncation=True, max_length=512)
# y_train = tokenizer([str(x) for x in y_train]).input_ids
# y_val = tokenizer([str(x) for x in y_val]).input_ids
y_train_tokenized = tokenizer(y_train, padding=True, truncation=True, max_length=128).input_ids
y_val_tokenized = tokenizer(y_val, padding=True, truncation=True, max_length=128).input_ids
train_dataset = Dataset(X_train_tokenized, y_train_tokenized)
val_dataset = Dataset(X_val_tokenized, y_val_tokenized)
# ----- 2. Fine-tune pretrained model -----#
# Define Trainer parameters
# def compute_metrics(p):
# pred, labels = p
# pred = np.argmax(pred, axis=1)
# accuracy = accuracy_score(y_true=labels, y_pred=pred)
# recall = recall_score(y_true=labels, y_pred=pred)
# precision = precision_score(y_true=labels, y_pred=pred)
# f1 = f1_score(y_true=labels, y_pred=pred)
# return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}
# Define Trainer
args = TrainingArguments(
output_dir="output_byt5",
overwrite_output_dir="true",
#evaluation_strategy="epoch",
evaluation_strategy="steps",
eval_steps=20000,
#save_strategy="epoch",
save_strategy="steps",
save_steps=20000,
save_total_limit=3,
dataloader_num_workers=4,
learning_rate=0.0001,
per_device_train_batch_size=2,
per_device_eval_batch_size=2,
gradient_accumulation_steps=40,
num_train_epochs=10,
seed=0,
load_best_model_at_end=True,
report_to="wandb",
)
# args_dict = {
# "output_dir": './models/tpu',
# "per_gpu_eval_batch_size": 8,
# "num_cores": 8,
# 'training_script': 'train_t5_squad.py',
# "model_name_or_path": 't5-base',
# "max_len": 512 ,
# "target_max_len": 16,
# "overwrite_output_dir": True,
# "per_gpu_train_batch_size": 8,
# "gradient_accumulation_steps": 4,
# "learning_rate": 1e-4,
# "tpu_num_cores": 8,
# "num_train_epochs": 4,
# "do_train": True
# }
trainer = Trainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
#compute_metrics=compute_metrics,
#prediction_loss_only=True
#callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
)
# Train pre-trained model
trainer.train()
# ----- 3. Predict -----#
# # Load test data
#test_data = pd.read_csv(prefix + "test.csv", header=None)
#test_data = test_data.rename(columns={0:'sentiment', 1:'review'})
test_df = pd.read_csv("valid.tsv", sep="\t").astype(str)
# X_test = list(test_data["review"])[:100]
# y_test = list(test_data["sentiment"])[:100]
# X_test_tokenized = tokenizer(X_test, padding=True, truncation=True, max_length=512)
# y_test_tokenized = tokenizer([str(x) for x in y_test]).input_ids
#
X_test = test_df.input_text.tolist()
y_test = test_df.target_text.tolist()
# X_test_tokenized = tokenizer(X_test, padding=True, truncation=True, max_length=512)
# y_test_tokenized = tokenizer(y_test, padding=True, truncation=True, max_length=128).input_ids
# # # Create torch dataset
#test_dataset = Dataset(X_test_tokenized, y_test_tokenized)
# #
# # Load trained model
#model_path = "output/checkpoint-2438"
# #model = BertForSequenceClassification.from_pretrained(model_path, num_labels=2)
#model_path = "outputs/best_model/"
model_path = "output_byt5"
model = MT5ForConditionalGeneration.from_pretrained(model_path)
model = model.to("cuda:0")
# # # Define test trainer
# args = TrainingArguments(
# output_dir="output",
# #overwrite_output_dir="true",
# #evaluation_strategy="steps",
# #eval_steps=100,
# #save_strategy="epoch",
# #save_steps=100,
# #per_device_train_batch_size=32,
# #gradient_accumulation_steps=2,
# per_device_eval_batch_size=4,
# #per_device_test_batch_size=4,
# num_train_epochs=1,
# seed=0,
# #load_best_model_at_end=True,
# )
# test_trainer = Trainer(model, args=args)
# # Make prediction
# raw_pred, _, _ = test_trainer.predict(test_dataset)
# y_pred.attach(torch.from_numpy(raw_pred[0]).max(dim=2)[1])
#X_test_solve = ["Solve: " + x for x in X_test]
#X_test_tokenized_ids = tokenizer(X_test_solve, padding=True, truncation=True, max_length=512, return_tensors='pt').input_ids
X_test_tokenized_ids = tokenizer(X_test, padding=True, truncation=True, max_length=512, return_tensors='pt').input_ids
decoded_list = []
for i in tqdm(range(0,len(X_test_tokenized_ids),16)):
y_pred = model.generate(X_test_tokenized_ids[i:i+16].to("cuda:0"))
decoded_list.append(tokenizer.batch_decode(y_pred))
from tqdm import tqdm
import re
import itertools
def normalizetext(text):
# Percentage to Fraction
text = text.replace('x=', '')
text = re.sub('([0-9]+)[:]([0-9]+)', '(\g<1>/\g<2>)', text)
text = re.sub('([0-9]+)[(]([0-9]+)[/]([0-9]+)[)]', '(\g<1>*\g<3>+\g<2>)/(\g<3>)', text)
objj = re.findall('(?!(?:\.)?%)\d+(?:\.\d+)?%', text)
for s in objj:
text = text.replace(s, '('+s[:-1]+'/100)')
text = re.sub('([0-9]+)[(]([0-9]+)[/]([0-9]+)[)]', '(\g<1>*\g<3>+\g<2>)/(\g<3>)', text)
return text
joined = list(itertools.chain.from_iterable(decoded_list))
joined_refined = [re.sub('<pad>', '', x)[1:-4] for x in joined]
test_df['predicted'] = joined_refined
error_list = []
right_list = []
wrong_list = []
for idx, expr in enumerate(test_df["predicted"].tolist()):
try:
result1 = sympify(normalizetext(expr))
result2 = sympify(normalizetext(test_df["target_text"].loc[idx]))
if (result1 - result2)** 2 < 1e-5:
right_list.append((idx, result1, result2))
else:
wrong_list.append((idx, result1, result2))
except Exception as e:
error_list.append(idx)
print(e)
len(right_list) / len(test_df) * 100
len(wrong_list) / len(test_df) * 100
len(error_list) / len(test_df) * 100
| []
| []
| [
"TOKENIZERS_PARALLELISM"
]
| [] | ["TOKENIZERS_PARALLELISM"] | python | 1 | 0 | |
packet/client.go | package packet
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"runtime"
cacherClient "github.com/packethost/cacher/client"
"github.com/packethost/pkg/env"
"github.com/pkg/errors"
"github.com/tinkerbell/boots/httplog"
tinkClient "github.com/tinkerbell/tink/client"
tw "github.com/tinkerbell/tink/protos/workflow"
)
type hardwareGetter interface {
}
// Client has all the fields corresponding to connection
type Client struct {
http *http.Client
baseURL *url.URL
consumerToken string
authToken string
hardwareClient hardwareGetter
workflowClient tw.WorkflowServiceClient
}
func NewClient(consumerToken, authToken string, baseURL *url.URL) (*Client, error) {
t, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, errors.New("unexpected type for http.DefaultTransport")
}
transport := t.Clone()
transport.MaxIdleConnsPerHost = env.Int("BOOTS_HTTP_HOST_CONNECTIONS", runtime.GOMAXPROCS(0)/2)
c := &http.Client{
Transport: &httplog.Transport{
RoundTripper: transport,
},
}
var hg hardwareGetter
var wg tw.WorkflowServiceClient
var err error
dataModelVersion := os.Getenv("DATA_MODEL_VERSION")
switch dataModelVersion {
// Tinkerbell V1 backend
case "1":
hg, err = tinkClient.TinkHardwareClient()
if err != nil {
return nil, errors.Wrap(err, "connect to tink")
}
wg, err = tinkClient.TinkWorkflowClient()
if err != nil {
return nil, errors.Wrap(err, "connect to tink")
}
// classic Packet API / Cacher backend (default for empty envvar)
case "":
facility := os.Getenv("FACILITY_CODE")
if facility == "" {
return nil, errors.New("FACILITY_CODE env must be set")
}
hg, err = cacherClient.New(facility)
if err != nil {
return nil, errors.Wrap(err, "connect to cacher")
}
// standalone, use a json file for all hardware data
case "standalone":
saFile := os.Getenv("BOOTS_STANDALONE_JSON")
if saFile == "" {
return nil, errors.New("BOOTS_STANDALONE_JSON env must be set")
}
// set the baseURL from here so it gets returned in the client
// TODO(@tobert): maybe there's a way to pass a file:// in the first place?
baseURL, err = url.Parse("file://" + saFile)
if err != nil {
return nil, errors.Wrapf(err, "unable to convert path %q to a URL as 'file://%s'", saFile, saFile)
}
saData, err := ioutil.ReadFile(saFile)
if err != nil {
return nil, errors.Wrapf(err, "could not read file %q", saFile)
}
dsDb := []DiscoverStandalone{}
err = json.Unmarshal(saData, &dsDb)
if err != nil {
return nil, errors.Wrapf(err, "unable to parse configuration file %q", saFile)
}
// the "client" part is done - reading the json, now return a struct client
// that is just the filename and parsed data structure
hg = StandaloneClient{
filename: saFile,
db: dsDb,
}
default:
return nil, errors.Errorf("invalid DATA_MODEL_VERSION: %q", dataModelVersion)
}
return &Client{
http: c,
baseURL: baseURL,
consumerToken: consumerToken,
authToken: authToken,
hardwareClient: hg,
workflowClient: wg,
}, nil
}
func NewMockClient(baseURL *url.URL, workflowClient tw.WorkflowServiceClient) *Client {
t := &httplog.Transport{
RoundTripper: http.DefaultTransport,
}
c := &http.Client{
Transport: t,
}
return &Client{
http: c,
workflowClient: workflowClient,
baseURL: baseURL,
}
}
func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) error {
req = req.WithContext(ctx)
req.URL = c.baseURL.ResolveReference(req.URL)
c.addHeaders(req)
res, err := c.http.Do(req)
if err != nil {
return errors.Wrap(err, "submit http request")
}
return unmarshalResponse(res, v)
}
func (c *Client) Get(ctx context.Context, ref string, v interface{}) error {
req, err := http.NewRequest("GET", ref, nil)
if err != nil {
return errors.Wrap(err, "setup GET request")
}
return c.Do(ctx, req, v)
}
func (c *Client) Patch(ctx context.Context, ref, mime string, body io.Reader, v interface{}) error {
req, err := http.NewRequest("PATCH", ref, body)
if err != nil {
return errors.Wrap(err, "setup PATCH request")
}
if mime != "" {
req.Header.Set("Content-Type", mime)
}
return c.Do(ctx, req, v)
}
func (c *Client) Post(ctx context.Context, ref, mime string, body io.Reader, v interface{}) error {
req, err := http.NewRequest("POST", ref, body)
if err != nil {
return errors.Wrap(err, "setup POST request")
}
if mime != "" {
req.Header.Set("Content-Type", mime)
}
return c.Do(ctx, req, v)
}
func (c *Client) addHeaders(req *http.Request) {
h := req.Header
h.Set("X-Packet-Staff", "1")
if c.consumerToken != "" {
h.Set("X-Consumer-Token", c.consumerToken)
}
if c.authToken != "" {
h.Set("X-Auth-Token", c.authToken)
}
}
func unmarshalResponse(res *http.Response, result interface{}) error {
defer res.Body.Close()
defer io.Copy(ioutil.Discard, res.Body) // ensure all of the body is read so we can quickly reuse connection
if res.StatusCode < 200 || res.StatusCode > 399 {
e := &httpError{
StatusCode: res.StatusCode,
}
e.unmarshalErrors(res.Body)
return errors.Wrap(e, "unmarshalling response")
}
var err error
if result == nil {
return nil
}
err = errors.Wrap(json.NewDecoder(res.Body).Decode(result), "decode json body")
if err == nil {
return nil
}
return errors.Wrap(&httpError{
StatusCode: res.StatusCode,
Errors: []error{err},
}, "unmarshalling response")
}
| [
"\"DATA_MODEL_VERSION\"",
"\"FACILITY_CODE\"",
"\"BOOTS_STANDALONE_JSON\""
]
| []
| [
"BOOTS_STANDALONE_JSON",
"DATA_MODEL_VERSION",
"FACILITY_CODE"
]
| [] | ["BOOTS_STANDALONE_JSON", "DATA_MODEL_VERSION", "FACILITY_CODE"] | go | 3 | 0 | |
pkg/nginx/template/template.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package template
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"net"
"net/url"
"os"
"os/exec"
"strconv"
"strings"
text_template "text/template"
"github.com/golang/glog"
"github.com/pborman/uuid"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/ingress-nginx/pkg/ingress"
"k8s.io/ingress-nginx/pkg/ingress/annotations/ratelimit"
ing_net "k8s.io/ingress-nginx/pkg/net"
"k8s.io/ingress-nginx/pkg/nginx/config"
"k8s.io/ingress-nginx/pkg/watch"
)
const (
slash = "/"
nonIdempotent = "non_idempotent"
defBufferSize = 65535
)
// Template ...
type Template struct {
tmpl *text_template.Template
fw watch.FileWatcher
s int
}
//NewTemplate returns a new Template instance or an
//error if the specified template file contains errors
func NewTemplate(file string, onChange func()) (*Template, error) {
tmpl, err := text_template.New("nginx.tmpl").Funcs(funcMap).ParseFiles(file)
if err != nil {
return nil, err
}
fw, err := watch.NewFileWatcher(file, onChange)
if err != nil {
return nil, err
}
return &Template{
tmpl: tmpl,
fw: fw,
s: defBufferSize,
}, nil
}
// Close removes the file watcher
func (t *Template) Close() {
t.fw.Close()
}
// Write populates a buffer using a template with NGINX configuration
// and the servers and upstreams created by Ingress rules
func (t *Template) Write(conf config.TemplateConfig) ([]byte, error) {
tmplBuf := bytes.NewBuffer(make([]byte, 0, t.s))
outCmdBuf := bytes.NewBuffer(make([]byte, 0, t.s))
defer func() {
if t.s < tmplBuf.Cap() {
glog.V(2).Infof("adjusting template buffer size from %v to %v", t.s, tmplBuf.Cap())
t.s = tmplBuf.Cap()
}
}()
if glog.V(3) {
b, err := json.Marshal(conf)
if err != nil {
glog.Errorf("unexpected error: %v", err)
}
glog.Infof("NGINX configuration: %v", string(b))
}
err := t.tmpl.Execute(tmplBuf, conf)
if err != nil {
return nil, err
}
// squeezes multiple adjacent empty lines to be single
// spaced this is to avoid the use of regular expressions
cmd := exec.Command("/ingress-controller/clean-nginx-conf.sh")
cmd.Stdin = tmplBuf
cmd.Stdout = outCmdBuf
if err := cmd.Run(); err != nil {
glog.Warningf("unexpected error cleaning template: %v", err)
return tmplBuf.Bytes(), nil
}
return outCmdBuf.Bytes(), nil
}
var (
funcMap = text_template.FuncMap{
"empty": func(input interface{}) bool {
check, ok := input.(string)
if ok {
return len(check) == 0
}
return true
},
"buildLocation": buildLocation,
"buildAuthLocation": buildAuthLocation,
"buildAuthResponseHeaders": buildAuthResponseHeaders,
"buildProxyPass": buildProxyPass,
"filterRateLimits": filterRateLimits,
"buildRateLimitZones": buildRateLimitZones,
"buildRateLimit": buildRateLimit,
"buildResolvers": buildResolvers,
"buildUpstreamName": buildUpstreamName,
"isLocationAllowed": isLocationAllowed,
"buildLogFormatUpstream": buildLogFormatUpstream,
"buildDenyVariable": buildDenyVariable,
"getenv": os.Getenv,
"contains": strings.Contains,
"hasPrefix": strings.HasPrefix,
"hasSuffix": strings.HasSuffix,
"toUpper": strings.ToUpper,
"toLower": strings.ToLower,
"formatIP": formatIP,
"buildNextUpstream": buildNextUpstream,
"getIngressInformation": getIngressInformation,
"serverConfig": func(all config.TemplateConfig, server *ingress.Server) interface{} {
return struct{ First, Second interface{} }{all, server}
},
"isValidClientBodyBufferSize": isValidClientBodyBufferSize,
"buildForwardedFor": buildForwardedFor,
"buildAuthSignURL": buildAuthSignURL,
}
)
// formatIP will wrap IPv6 addresses in [] and return IPv4 addresses
// without modification. If the input cannot be parsed as an IP address
// it is returned without modification.
func formatIP(input string) string {
ip := net.ParseIP(input)
if ip == nil {
return input
}
if v4 := ip.To4(); v4 != nil {
return input
}
return fmt.Sprintf("[%s]", input)
}
// buildResolvers returns the resolvers reading the /etc/resolv.conf file
func buildResolvers(input interface{}) string {
// NGINX need IPV6 addresses to be surrounded by brackets
nss, ok := input.([]net.IP)
if !ok {
glog.Errorf("expected a '[]net.IP' type but %T was returned", input)
return ""
}
if len(nss) == 0 {
return ""
}
r := []string{"resolver"}
for _, ns := range nss {
if ing_net.IsIPV6(ns) {
r = append(r, fmt.Sprintf("[%v]", ns))
} else {
r = append(r, fmt.Sprintf("%v", ns))
}
}
r = append(r, "valid=30s;")
return strings.Join(r, " ")
}
// buildLocation produces the location string, if the ingress has redirects
// (specified through the ingress.kubernetes.io/rewrite-to annotation)
func buildLocation(input interface{}) string {
location, ok := input.(*ingress.Location)
if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return slash
}
path := location.Path
if len(location.Rewrite.Target) > 0 && location.Rewrite.Target != path {
if path == slash {
return fmt.Sprintf("~* %s", path)
}
// baseuri regex will parse basename from the given location
baseuri := `(?<baseuri>.*)`
if !strings.HasSuffix(path, slash) {
// Not treat the slash after "location path" as a part of baseuri
baseuri = fmt.Sprintf(`\/?%s`, baseuri)
}
return fmt.Sprintf(`~* ^%s%s`, path, baseuri)
}
return path
}
// TODO: Needs Unit Tests
func buildAuthLocation(input interface{}) string {
location, ok := input.(*ingress.Location)
if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return ""
}
if location.ExternalAuth.URL == "" {
return ""
}
str := base64.URLEncoding.EncodeToString([]byte(location.Path))
// avoid locations containing the = char
str = strings.Replace(str, "=", "", -1)
return fmt.Sprintf("/_external-auth-%v", str)
}
func buildAuthResponseHeaders(input interface{}) []string {
location, ok := input.(*ingress.Location)
res := []string{}
if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return res
}
if len(location.ExternalAuth.ResponseHeaders) == 0 {
return res
}
for i, h := range location.ExternalAuth.ResponseHeaders {
hvar := strings.ToLower(h)
hvar = strings.NewReplacer("-", "_").Replace(hvar)
res = append(res, fmt.Sprintf("auth_request_set $authHeader%v $upstream_http_%v;", i, hvar))
res = append(res, fmt.Sprintf("proxy_set_header '%v' $authHeader%v;", h, i))
}
return res
}
func buildLogFormatUpstream(input interface{}) string {
cfg, ok := input.(config.Configuration)
if !ok {
glog.Errorf("expected a 'config.Configuration' type but %T was returned", input)
return ""
}
return cfg.BuildLogFormatUpstream()
}
// buildProxyPass produces the proxy pass string, if the ingress has redirects
// (specified through the ingress.kubernetes.io/rewrite-to annotation)
// If the annotation ingress.kubernetes.io/add-base-url:"true" is specified it will
// add a base tag in the head of the response from the service
func buildProxyPass(host string, b interface{}, loc interface{}) string {
backends, ok := b.([]*ingress.Backend)
if !ok {
glog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b)
return ""
}
location, ok := loc.(*ingress.Location)
if !ok {
glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return ""
}
path := location.Path
proto := "http"
upstreamName := location.Backend
for _, backend := range backends {
if backend.Name == location.Backend {
if backend.Secure || backend.SSLPassthrough {
proto = "https"
}
if isSticky(host, location, backend.SessionAffinity.CookieSessionAffinity.Locations) {
upstreamName = fmt.Sprintf("sticky-%v", upstreamName)
}
break
}
}
// defProxyPass returns the default proxy_pass, just the name of the upstream
defProxyPass := fmt.Sprintf("proxy_pass %s://%s;", proto, upstreamName)
// if the path in the ingress rule is equals to the target: no special rewrite
if path == location.Rewrite.Target {
return defProxyPass
}
if !strings.HasSuffix(path, slash) {
path = fmt.Sprintf("%s/", path)
}
if len(location.Rewrite.Target) > 0 {
abu := ""
if location.Rewrite.AddBaseURL {
// path has a slash suffix, so that it can be connected with baseuri directly
bPath := fmt.Sprintf("%s%s", path, "$baseuri")
regex := `(<(?:H|h)(?:E|e)(?:A|a)(?:D|d)(?:[^">]|"[^"]*")*>)`
if len(location.Rewrite.BaseURLScheme) > 0 {
abu = fmt.Sprintf(`subs_filter '%v' '$1<base href="%v://$http_host%v">' ro;
`, regex, location.Rewrite.BaseURLScheme, bPath)
} else {
abu = fmt.Sprintf(`subs_filter '%v' '$1<base href="$scheme://$http_host%v">' ro;
`, regex, bPath)
}
}
if location.Rewrite.Target == slash {
// special case redirect to /
// ie /something to /
return fmt.Sprintf(`
rewrite %s(.*) /$1 break;
rewrite %s / break;
proxy_pass %s://%s;
%v`, path, location.Path, proto, upstreamName, abu)
}
return fmt.Sprintf(`
rewrite %s(.*) %s/$1 break;
proxy_pass %s://%s;
%v`, path, location.Rewrite.Target, proto, upstreamName, abu)
}
// default proxy_pass
return defProxyPass
}
// TODO: Needs Unit Tests
func filterRateLimits(input interface{}) []ratelimit.RateLimit {
ratelimits := []ratelimit.RateLimit{}
found := sets.String{}
servers, ok := input.([]*ingress.Server)
if !ok {
glog.Errorf("expected a '[]ratelimit.RateLimit' type but %T was returned", input)
return ratelimits
}
for _, server := range servers {
for _, loc := range server.Locations {
if loc.RateLimit.ID != "" && !found.Has(loc.RateLimit.ID) {
found.Insert(loc.RateLimit.ID)
ratelimits = append(ratelimits, loc.RateLimit)
}
}
}
return ratelimits
}
// TODO: Needs Unit Tests
// buildRateLimitZones produces an array of limit_conn_zone in order to allow
// rate limiting of request. Each Ingress rule could have up to three zones, one
// for connection limit by IP address, one for limiting requests per minute, and
// one for limiting requests per second.
func buildRateLimitZones(input interface{}) []string {
zones := sets.String{}
servers, ok := input.([]*ingress.Server)
if !ok {
glog.Errorf("expected a '[]*ingress.Server' type but %T was returned", input)
return zones.List()
}
for _, server := range servers {
for _, loc := range server.Locations {
if loc.RateLimit.Connections.Limit > 0 {
zone := fmt.Sprintf("limit_conn_zone $limit_%s zone=%v:%vm;",
loc.RateLimit.ID,
loc.RateLimit.Connections.Name,
loc.RateLimit.Connections.SharedSize)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
if loc.RateLimit.RPM.Limit > 0 {
zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/m;",
loc.RateLimit.ID,
loc.RateLimit.RPM.Name,
loc.RateLimit.RPM.SharedSize,
loc.RateLimit.RPM.Limit)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
if loc.RateLimit.RPS.Limit > 0 {
zone := fmt.Sprintf("limit_req_zone $limit_%s zone=%v:%vm rate=%vr/s;",
loc.RateLimit.ID,
loc.RateLimit.RPS.Name,
loc.RateLimit.RPS.SharedSize,
loc.RateLimit.RPS.Limit)
if !zones.Has(zone) {
zones.Insert(zone)
}
}
}
}
return zones.List()
}
// buildRateLimit produces an array of limit_req to be used inside the Path of
// Ingress rules. The order: connections by IP first, then RPS, and RPM last.
func buildRateLimit(input interface{}) []string {
limits := []string{}
loc, ok := input.(*ingress.Location)
if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return limits
}
if loc.RateLimit.Connections.Limit > 0 {
limit := fmt.Sprintf("limit_conn %v %v;",
loc.RateLimit.Connections.Name, loc.RateLimit.Connections.Limit)
limits = append(limits, limit)
}
if loc.RateLimit.RPS.Limit > 0 {
limit := fmt.Sprintf("limit_req zone=%v burst=%v nodelay;",
loc.RateLimit.RPS.Name, loc.RateLimit.RPS.Burst)
limits = append(limits, limit)
}
if loc.RateLimit.RPM.Limit > 0 {
limit := fmt.Sprintf("limit_req zone=%v burst=%v nodelay;",
loc.RateLimit.RPM.Name, loc.RateLimit.RPM.Burst)
limits = append(limits, limit)
}
if loc.RateLimit.LimitRateAfter > 0 {
limit := fmt.Sprintf("limit_rate_after %vk;",
loc.RateLimit.LimitRateAfter)
limits = append(limits, limit)
}
if loc.RateLimit.LimitRate > 0 {
limit := fmt.Sprintf("limit_rate %vk;",
loc.RateLimit.LimitRate)
limits = append(limits, limit)
}
return limits
}
func isLocationAllowed(input interface{}) bool {
loc, ok := input.(*ingress.Location)
if !ok {
glog.Errorf("expected an '*ingress.Location' type but %T was returned", input)
return false
}
return loc.Denied == nil
}
var (
denyPathSlugMap = map[string]string{}
)
// buildDenyVariable returns a nginx variable for a location in a
// server to be used in the whitelist check
// This method uses a unique id generator library to reduce the
// size of the string to be used as a variable in nginx to avoid
// issue with the size of the variable bucket size directive
func buildDenyVariable(a interface{}) string {
l, ok := a.(string)
if !ok {
glog.Errorf("expected a 'string' type but %T was returned", a)
return ""
}
if _, ok := denyPathSlugMap[l]; !ok {
denyPathSlugMap[l] = buildRandomUUID()
}
return fmt.Sprintf("$deny_%v", denyPathSlugMap[l])
}
// TODO: Needs Unit Tests
func buildUpstreamName(host string, b interface{}, loc interface{}) string {
backends, ok := b.([]*ingress.Backend)
if !ok {
glog.Errorf("expected an '[]*ingress.Backend' type but %T was returned", b)
return ""
}
location, ok := loc.(*ingress.Location)
if !ok {
glog.Errorf("expected a '*ingress.Location' type but %T was returned", loc)
return ""
}
upstreamName := location.Backend
for _, backend := range backends {
if backend.Name == location.Backend {
if backend.SessionAffinity.AffinityType == "cookie" &&
isSticky(host, location, backend.SessionAffinity.CookieSessionAffinity.Locations) {
upstreamName = fmt.Sprintf("sticky-%v", upstreamName)
}
break
}
}
return upstreamName
}
// TODO: Needs Unit Tests
func isSticky(host string, loc *ingress.Location, stickyLocations map[string][]string) bool {
if _, ok := stickyLocations[host]; ok {
for _, sl := range stickyLocations[host] {
if sl == loc.Path {
return true
}
}
}
return false
}
func buildNextUpstream(i, r interface{}) string {
nextUpstream, ok := i.(string)
if !ok {
glog.Errorf("expected a 'string' type but %T was returned", i)
return ""
}
retryNonIdempotent := r.(bool)
parts := strings.Split(nextUpstream, " ")
nextUpstreamCodes := make([]string, 0, len(parts))
for _, v := range parts {
if v != "" && v != nonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, v)
}
if v == nonIdempotent {
retryNonIdempotent = true
}
}
if retryNonIdempotent {
nextUpstreamCodes = append(nextUpstreamCodes, nonIdempotent)
}
return strings.Join(nextUpstreamCodes, " ")
}
// buildRandomUUID return a random string to be used in the template
func buildRandomUUID() string {
s := uuid.New()
return strings.Replace(s, "-", "", -1)
}
func isValidClientBodyBufferSize(input interface{}) bool {
s, ok := input.(string)
if !ok {
glog.Errorf("expected an 'string' type but %T was returned", input)
return false
}
if s == "" {
return false
}
_, err := strconv.Atoi(s)
if err != nil {
sLowercase := strings.ToLower(s)
kCheck := strings.TrimSuffix(sLowercase, "k")
_, err := strconv.Atoi(kCheck)
if err == nil {
return true
}
mCheck := strings.TrimSuffix(sLowercase, "m")
_, err = strconv.Atoi(mCheck)
if err == nil {
return true
}
glog.Errorf("client-body-buffer-size '%v' was provided in an incorrect format, hence it will not be set.", s)
return false
}
return true
}
type ingressInformation struct {
Namespace string
Rule string
Service string
Annotations map[string]string
}
func getIngressInformation(i, p interface{}) *ingressInformation {
ing, ok := i.(*extensions.Ingress)
if !ok {
glog.Errorf("expected an '*extensions.Ingress' type but %T was returned", i)
return &ingressInformation{}
}
path, ok := p.(string)
if !ok {
glog.Errorf("expected a 'string' type but %T was returned", p)
return &ingressInformation{}
}
if ing == nil {
return &ingressInformation{}
}
info := &ingressInformation{
Namespace: ing.GetNamespace(),
Rule: ing.GetName(),
Annotations: ing.Annotations,
}
if ing.Spec.Backend != nil {
info.Service = ing.Spec.Backend.ServiceName
}
for _, rule := range ing.Spec.Rules {
if rule.HTTP == nil {
continue
}
for _, rPath := range rule.HTTP.Paths {
if path == rPath.Path {
info.Service = rPath.Backend.ServiceName
return info
}
}
}
return info
}
func buildForwardedFor(input interface{}) string {
s, ok := input.(string)
if !ok {
glog.Errorf("expected a 'string' type but %T was returned", input)
return ""
}
ffh := strings.Replace(s, "-", "_", -1)
ffh = strings.ToLower(ffh)
return fmt.Sprintf("$http_%v", ffh)
}
func buildAuthSignURL(input interface{}) string {
s, ok := input.(string)
if !ok {
glog.Errorf("expected an 'string' type but %T was returned", input)
return ""
}
u, _ := url.Parse(s)
q := u.Query()
if len(q) == 0 {
return fmt.Sprintf("%v?rd=$pass_access_scheme://$http_host$request_uri", s)
}
if q.Get("rd") != "" {
return s
}
return fmt.Sprintf("%v&rd=$pass_access_scheme://$http_host$request_uri", s)
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
core/src/main/java/com/energyxxer/inject/utils/MinecraftUtils.java | package com.energyxxer.inject.utils;
import java.io.File;
/**
* Created by User on 4/11/2017.
*/
public class MinecraftUtils {
public static String getDefaultMinecraftDir() {
String workingDirectory;
// here, we assign the name of the OS, according to Java, to a
// variable...
String OS = (System.getProperty("os.name")).toUpperCase();
// to determine what the workingDirectory is.
// if it is some version of Windows
if (OS.contains("WIN")) {
// it is simply the location of the "AppData" folder
workingDirectory = System.getenv("AppData");
}
// Otherwise, we assume Linux or Mac
else {
// in either case, we would start in the user's home directory
workingDirectory = System.getProperty("user.home");
// if we are on a Mac, we are not done, we look for "Application
// Support"
if(OS.contains("MAC")) workingDirectory += "/Library/Application Support";
}
if(OS.contains("MAC")) workingDirectory += File.separator + "minecraft";
else workingDirectory += File.separator + ".minecraft";
return workingDirectory;
}
private MinecraftUtils() {
}
}
| [
"\"AppData\""
]
| []
| [
"AppData"
]
| [] | ["AppData"] | java | 1 | 0 | |
test/e2e/cli_test.go | package e2e
import (
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/apimachinery/pkg/apis/meta/v1"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo/test/e2e/fixtures"
)
type CLISuite struct {
fixtures.E2ESuite
}
func (s *CLISuite) BeforeTest(suiteName, testName string) {
s.E2ESuite.BeforeTest(suiteName, testName)
_ = os.Unsetenv("ARGO_SERVER")
_ = os.Unsetenv("ARGO_TOKEN")
}
func (s *CLISuite) testNeedsOffloading() {
skip := s.Persistence.IsEnabled() && os.Getenv("ARGO_SERVER") == ""
if skip {
s.T().Skip("test needs offloading, but not Argo Server available")
}
}
func (s *CLISuite) TestCompletion() {
s.Given().RunCli([]string{"completion", "bash"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
assert.Contains(t, output, "bash completion for argo")
})
}
func (s *CLISuite) TestVersion() {
// check we can run this without error
s.Given().
RunCli([]string{"version"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
}
func (s *CLISuite) TestSubmitDryRun() {
s.Given().
RunCli([]string{"submit", "smoke/basic.yaml", "--dry-run", "-o", "yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "name: basic")
// dry-run should never get a UID
assert.NotContains(t, output, "uid:")
}
})
}
func (s *CLISuite) TestSubmitServerDryRun() {
s.Given().
RunCli([]string{"submit", "smoke/basic.yaml", "--server-dry-run", "-o", "yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "name: basic")
// server-dry-run should get a UID
assert.Contains(t, output, "uid:")
}
})
}
func (s *CLISuite) TestTokenArg() {
if os.Getenv("CI") != "true" {
s.T().SkipNow()
}
s.Run("ListWithBadToken", func() {
s.Given().RunCli([]string{"list", "--user", "fake_token_user", "--token", "badtoken"}, func(t *testing.T, output string, err error) {
assert.Error(t, err)
})
})
var goodToken string
s.Run("GetSAToken", func() {
token, err := s.GetServiceAccountToken()
assert.NoError(s.T(), err)
goodToken = token
})
s.Run("ListWithGoodToken", func() {
s.Given().RunCli([]string{"list", "--user", "fake_token_user", "--token", goodToken}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
assert.Contains(t, output, "NAME")
assert.Contains(t, output, "STATUS")
})
})
}
func (s *CLISuite) TestLogs() {
s.Given().
Workflow(`@smoke/basic.yaml`).
When().
SubmitWorkflow().
WaitForWorkflowToStart(5*time.Second).
WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool {
return wf.Status.Nodes.FindByDisplayName("basic") != nil
}, "pod running", 10*time.Second)
s.Run("FollowWorkflowLogs", func() {
s.Given().
RunCli([]string{"logs", "basic", "--follow"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, ":) Hello Argo!")
}
})
})
s.Run("FollowPodLogs", func() {
s.Given().
RunCli([]string{"logs", "basic", "basic", "--follow"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, ":) Hello Argo!")
}
})
})
s.Run("ContainerLogs", func() {
s.Given().
RunCli([]string{"logs", "basic", "basic", "-c", "wait"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Executor")
}
})
})
s.Run("Since", func() {
s.Given().
RunCli([]string{"logs", "basic", "--since=1s"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.NotContains(t, output, ":) Hello Argo!")
}
})
})
s.Run("SinceTime", func() {
s.Given().
RunCli([]string{"logs", "basic", "--since-time=" + time.Now().Format(time.RFC3339)}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.NotContains(t, output, ":) Hello Argo!")
}
})
})
s.Run("TailLines", func() {
s.Given().
RunCli([]string{"logs", "basic", "--tail=0"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.NotContains(t, output, ":) Hello Argo!")
}
})
})
s.Run("CompletedWorkflow", func() {
s.Given().
WorkflowName("basic").
When().
WaitForWorkflow(10*time.Second).
Then().
RunCli([]string{"logs", "basic", "--tail=10"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, ":) Hello Argo!")
}
})
})
}
// this test probably should be in the ArgoServerSuite, but it's just much easier to write the test
// for the CLI
func (s *CLISuite) TestLogProblems() {
s.Given().
Workflow(`@testdata/log-problems.yaml`).
When().
SubmitWorkflow().
WaitForWorkflowToStart(5*time.Second).
Then().
// logs should come in order
RunCli([]string{"logs", "log-problems", "--follow"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
lines := strings.Split(output, "\n")
if assert.Len(t, lines, 6) {
assert.Contains(t, lines[0], "one")
assert.Contains(t, lines[1], "two")
assert.Contains(t, lines[2], "three")
assert.Contains(t, lines[3], "four")
assert.Contains(t, lines[4], "five")
}
}
}).
When().
// Next check that all log entries and received and in the correct order.
WaitForWorkflow(30*time.Second).
Then().
RunCli([]string{"logs", "log-problems"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
lines := strings.Split(output, "\n")
if assert.Len(t, lines, 6) {
assert.Contains(t, lines[0], "one")
assert.Contains(t, lines[1], "two")
assert.Contains(t, lines[2], "three")
assert.Contains(t, lines[3], "four")
assert.Contains(t, lines[4], "five")
}
}
})
}
func (s *CLISuite) TestRoot() {
s.Run("Submit", func() {
s.Given().RunCli([]string{"submit", "smoke/basic.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "ServiceAccount:")
assert.Contains(t, output, "Status:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("List", func() {
s.testNeedsOffloading()
for i := 0; i < 3; i++ {
s.Given().
Workflow("@smoke/basic-generate-name.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(20 * time.Second)
}
s.Given().RunCli([]string{"list", "--chunk-size", "1"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "NAME")
assert.Contains(t, output, "STATUS")
assert.Contains(t, output, "AGE")
assert.Contains(t, output, "DURATION")
assert.Contains(t, output, "PRIORITY")
}
})
})
s.Run("Get", func() {
s.testNeedsOffloading()
s.Given().RunCli([]string{"get", "basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "ServiceAccount:")
assert.Contains(t, output, "Status:")
assert.Contains(t, output, "Created:")
}
})
})
var createdWorkflowName string
s.Run("From", func() {
s.Given().CronWorkflow("@cron/basic.yaml").
When().
CreateCronWorkflow().
RunCli([]string{"submit", "--from", "cronwf/test-cron-wf-basic", "-l", "argo-e2e=true"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
assert.Contains(t, output, "Name: test-cron-wf-basic-")
r := regexp.MustCompile(`Name:\s+?(test-cron-wf-basic-[a-z0-9]+)`)
res := r.FindStringSubmatch(output)
if len(res) != 2 {
assert.Fail(t, "Internal test error, please report a bug")
}
createdWorkflowName = res[1]
}).
WaitForWorkflowName(createdWorkflowName, 30*time.Second).
Then().
ExpectWorkflowName(createdWorkflowName, func(t *testing.T, metadata *corev1.ObjectMeta, status *wfv1.WorkflowStatus) {
assert.Equal(t, wfv1.NodeSucceeded, status.Phase)
})
})
}
func (s *CLISuite) TestWorkflowSuspendResume() {
s.testNeedsOffloading()
s.Given().
Workflow("@testdata/sleep-3s.yaml").
When().
SubmitWorkflow().
WaitForWorkflowToStart(10*time.Second).
RunCli([]string{"suspend", "sleep-3s"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow sleep-3s suspended")
}
}).
RunCli([]string{"resume", "sleep-3s"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow sleep-3s resumed")
}
}).
WaitForWorkflow(20 * time.Second).
Then().
ExpectWorkflow(func(t *testing.T, _ *corev1.ObjectMeta, status *wfv1.WorkflowStatus) {
assert.Equal(t, wfv1.NodeSucceeded, status.Phase)
})
}
func (s *CLISuite) TestNodeSuspendResume() {
s.testNeedsOffloading()
s.Given().
Workflow("@testdata/node-suspend.yaml").
When().
SubmitWorkflow().
WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool {
return wf.Status.AnyActiveSuspendNode()
}, "suspended node", 30*time.Second).
RunCli([]string{"resume", "node-suspend", "--node-field-selector", "inputs.parameters.tag.value=suspend1-tag1"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow node-suspend resumed")
}
}).
WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool {
return wf.Status.AnyActiveSuspendNode()
}, "suspended node", 10*time.Second).
RunCli([]string{"stop", "node-suspend", "--node-field-selector", "inputs.parameters.tag.value=suspend2-tag1", "--message", "because"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow node-suspend stopped")
}
}).
WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool {
return wf.Status.Phase == wfv1.NodeFailed
}, "suspended node", 10*time.Second).
Then().
ExpectWorkflow(func(t *testing.T, _ *corev1.ObjectMeta, status *wfv1.WorkflowStatus) {
if assert.Equal(t, wfv1.NodeFailed, status.Phase) {
r := regexp.MustCompile(`child '(node-suspend-[0-9]+)' failed`)
res := r.FindStringSubmatch(status.Message)
assert.Equal(t, len(res), 2)
assert.Equal(t, status.Nodes[res[1]].Message, "because")
}
})
}
func (s *CLISuite) TestWorkflowDeleteByName() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(30*time.Second).
RunCli([]string{"delete", "basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Workflow 'basic' deleted")
}
})
}
func (s *CLISuite) TestWorkflowDeleteDryRun() {
s.Given().
When().
RunCli([]string{"delete", "--dry-run", "basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Workflow 'basic' deleted (dry-run)")
}
})
}
func (s *CLISuite) TestWorkflowDeleteNothing() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(30*time.Second).
RunCli([]string{"delete"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.NotContains(t, output, "deleted")
}
})
}
func (s *CLISuite) TestWorkflowDeleteNotFound() {
s.Given().
When().
RunCli([]string{"delete", "not-found"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Workflow 'not-found' not found")
}
})
}
func (s *CLISuite) TestWorkflowDeleteAll() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(30*time.Second).
Given().
RunCli([]string{"delete", "--all", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Workflow 'basic' deleted")
}
})
}
func (s *CLISuite) TestWorkflowDeleteCompleted() {
s.Given().
Workflow("@testdata/sleep-3s.yaml").
When().
SubmitWorkflow().
Given().
RunCli([]string{"delete", "--completed", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
// nothing should be deleted yet
assert.NotContains(t, output, "deleted")
}
}).
When().
WaitForWorkflow(30*time.Second).
Given().
RunCli([]string{"delete", "--completed", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "deleted")
}
})
}
func (s *CLISuite) TestWorkflowDeleteOlder() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(30*time.Second).
Given().
RunCli([]string{"delete", "--older", "1d", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
// nothing over a day should be deleted
assert.NotContains(t, output, "deleted")
}
}).
RunCli([]string{"delete", "--older", "0s", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "deleted")
}
})
}
func (s *CLISuite) TestWorkflowDeleteByPrefix() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(30*time.Second).
Given().
RunCli([]string{"delete", "--prefix", "missing-prefix", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
// nothing should be deleted
assert.NotContains(t, output, "deleted")
}
}).
RunCli([]string{"delete", "--prefix", "basic", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "deleted")
}
})
}
func (s *CLISuite) TestWorkflowLint() {
s.Run("LintFile", func() {
s.Given().RunCli([]string{"lint", "smoke/basic.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "smoke/basic.yaml is valid")
}
})
})
s.Run("LintFileEmptyParamDAG", func() {
s.Given().RunCli([]string{"lint", "expectedfailures/empty-parameter-dag.yaml"}, func(t *testing.T, output string, err error) {
if assert.EqualError(t, err, "exit status 1") {
assert.Contains(t, output, "templates.abc.tasks.a templates.whalesay inputs.parameters.message was not supplied")
}
})
})
s.Run("LintFileEmptyParamSteps", func() {
s.Given().RunCli([]string{"lint", "expectedfailures/empty-parameter-steps.yaml"}, func(t *testing.T, output string, err error) {
if assert.EqualError(t, err, "exit status 1") {
assert.Contains(t, output, "templates.abc.steps[0].a templates.whalesay inputs.parameters.message was not supplied")
}
})
})
s.Run("LintFileWithTemplate", func() {
s.Given().
WorkflowTemplate("@smoke/workflow-template-whalesay-template.yaml").
When().
CreateWorkflowTemplates().
Given().
RunCli([]string{"lint", "smoke/hello-world-workflow-tmpl.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "smoke/hello-world-workflow-tmpl.yaml is valid")
}
})
})
s.Run("LintDir", func() {
tmp, err := ioutil.TempDir("", "")
s.CheckError(err)
defer func() { _ = os.RemoveAll(tmp) }()
// Read all content of src to data
data, err := ioutil.ReadFile("smoke/basic.yaml")
s.CheckError(err)
// Write data to dst
err = ioutil.WriteFile(filepath.Join(tmp, "my-workflow.yaml"), data, 0644)
s.CheckError(err)
s.Given().
RunCli([]string{"lint", tmp}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "my-workflow.yaml is valid")
}
})
})
s.Run("Different Kind", func() {
s.Given().
RunCli([]string{"lint", "testdata/workflow-template-nested-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "WorkflowTemplate 'workflow-template-nested-template' is not of kind Workflow. Ignoring...")
assert.Contains(t, output, "Error in file testdata/workflow-template-nested-template.yaml: there was nothing to validate")
}
})
})
s.Run("Valid", func() {
s.Given().
RunCli([]string{"lint", "testdata/exit-1.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "exit-1.yaml is valid")
}
})
})
s.Run("Invalid", func() {
s.Given().
RunCli([]string{"lint", "expectedfailures/empty-parameter-dag.yaml"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "Error in file expectedfailures/empty-parameter-dag.yaml:")
}
})
})
// Not all files in this directory are Workflows, expect failure
s.Run("NotAllWorkflows", func() {
s.Given().
RunCli([]string{"lint", "testdata"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "WorkflowTemplate 'workflow-template-nested-template' is not of kind Workflow. Ignoring...")
assert.Contains(t, output, "Error in file testdata/workflow-template-nested-template.yaml: there was nothing to validate")
}
})
})
// All files in this directory are Workflows, expect success
s.Run("AllWorkflows", func() {
s.Given().
RunCli([]string{"lint", "stress"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
})
}
func (s *CLISuite) TestWorkflowRetry() {
s.testNeedsOffloading()
var retryTime corev1.Time
s.Given().
Workflow("@testdata/retry-test.yaml").
When().
SubmitWorkflow().
WaitForWorkflowToStart(5*time.Second).
WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool {
return wf.Status.AnyActiveSuspendNode()
}, "suspended node", 30*time.Second).
RunCli([]string{"terminate", "retry-test"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow retry-test terminated")
}
}).
WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool {
retryTime = wf.Status.FinishedAt
return wf.Status.Phase == wfv1.NodeFailed
}, "terminated", 20*time.Second).
RunCli([]string{"retry", "retry-test", "--restart-successful", "--node-field-selector", "templateName==steps-inner"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
}
}).
WaitForWorkflowCondition(func(wf *wfv1.Workflow) bool {
return wf.Status.AnyActiveSuspendNode()
}, "suspended node", 20*time.Second).
Then().
ExpectWorkflow(func(t *testing.T, _ *corev1.ObjectMeta, status *wfv1.WorkflowStatus) {
outerStepsPodNode := status.Nodes.FindByDisplayName("steps-outer-step1")
innerStepsPodNode := status.Nodes.FindByDisplayName("steps-inner-step1")
assert.True(t, outerStepsPodNode.FinishedAt.Before(&retryTime))
assert.True(t, retryTime.Before(&innerStepsPodNode.FinishedAt))
})
}
func (s *CLISuite) TestWorkflowTerminate() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
Given().
RunCli([]string{"terminate", "basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow basic terminated")
}
})
}
func (s *CLISuite) TestWorkflowWait() {
s.testNeedsOffloading()
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
Given().
RunCli([]string{"wait", "basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "basic Succeeded")
}
})
}
func (s *CLISuite) TestWorkflowWatch() {
s.testNeedsOffloading()
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
Given().
RunCli([]string{"watch", "basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
}
})
}
func (s *CLISuite) TestTemplate() {
s.Run("Lint", func() {
s.Given().RunCli([]string{"template", "lint", "smoke/workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "validated")
}
})
})
s.Run("Create", func() {
s.Given().RunCli([]string{"template", "create", "smoke/workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("List", func() {
s.Given().RunCli([]string{"template", "list"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "NAME")
}
})
})
s.Run("Get", func() {
s.Given().RunCli([]string{"template", "get", "not-found"}, func(t *testing.T, output string, err error) {
if assert.EqualError(t, err, "exit status 1") {
assert.Contains(t, output, `"not-found" not found`)
}
}).RunCli([]string{"template", "get", "workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("Submittable-Template", func() {
s.testNeedsOffloading()
s.Given().RunCli([]string{"submit", "--from", "workflowtemplate/workflow-template-whalesay-template", "-l", "argo-e2e=true"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
var workflowName string
s.Given().RunCli([]string{"list"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
r := regexp.MustCompile(`\s+?(workflow-template-whalesay-template-[a-z0-9]+)`)
res := r.FindStringSubmatch(output)
if len(res) != 2 {
assert.Fail(t, "Internal test error, please report a bug")
}
workflowName = res[1]
}
})
s.Given().
WorkflowName(workflowName).
When().
WaitForWorkflow(30*time.Second).
RunCli([]string{"get", workflowName}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, workflowName)
assert.Contains(t, output, "Succeeded")
}
})
})
s.Run("Delete", func() {
s.Given().RunCli([]string{"template", "delete", "workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
})
}
func (s *CLISuite) TestWorkflowResubmit() {
s.Given().
Workflow("@testdata/exit-1.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(30*time.Second).
Given().
RunCli([]string{"resubmit", "--memoized", "exit-1"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "ServiceAccount:")
assert.Contains(t, output, "Status:")
assert.Contains(t, output, "Created:")
}
})
}
func (s *CLISuite) TestCron() {
s.Run("Lint", func() {
s.Given().RunCli([]string{"cron", "lint", "cron/basic.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cron/basic.yaml is valid")
assert.Contains(t, output, "Cron workflow manifests validated")
}
})
})
s.Run("Different Kind", func() {
s.Given().
RunCli([]string{"cron", "lint", "testdata/workflow-template-nested-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "WorkflowTemplate 'workflow-template-nested-template' is not of kind CronWorkflow. Ignoring...")
assert.Contains(t, output, "Error in file testdata/workflow-template-nested-template.yaml: there was nothing to validate")
}
})
})
// Not all files in this directory are CronWorkflows, expect failure
s.Run("NotAllWorkflows", func() {
s.Given().
RunCli([]string{"cron", "lint", "testdata"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "WorkflowTemplate 'workflow-template-nested-template' is not of kind CronWorkflow. Ignoring...")
assert.Contains(t, output, "Error in file testdata/workflow-template-nested-template.yaml: there was nothing to validate")
}
})
})
// All files in this directory are CronWorkflows, expect success
s.Run("AllCron", func() {
s.Given().
RunCli([]string{"cron", "lint", "cron"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
})
s.Run("Create", func() {
s.Given().RunCli([]string{"cron", "create", "cron/basic.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
assert.Contains(t, output, "Schedule:")
assert.Contains(t, output, "Suspended:")
assert.Contains(t, output, "StartingDeadlineSeconds:")
assert.Contains(t, output, "ConcurrencyPolicy:")
}
})
})
s.Run("Delete", func() {
s.Given().RunCli([]string{"cron", "delete", "test-cron-wf-basic"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
})
s.Run("Create Schedule Override", func() {
s.Given().RunCli([]string{"cron", "create", "cron/basic.yaml", "--schedule", "1 2 3 * *"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Schedule: 1 2 3 * *")
}
})
})
s.Run("List", func() {
s.Given().RunCli([]string{"cron", "list"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "NAME")
assert.Contains(t, output, "AGE")
assert.Contains(t, output, "LAST RUN")
assert.Contains(t, output, "SCHEDULE")
assert.Contains(t, output, "SUSPENDED")
}
})
})
s.Run("Suspend", func() {
s.Given().RunCli([]string{"cron", "suspend", "test-cron-wf-basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "CronWorkflow 'test-cron-wf-basic' suspended")
}
})
})
s.Run("Resume", func() {
s.Given().RunCli([]string{"cron", "resume", "test-cron-wf-basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "CronWorkflow 'test-cron-wf-basic' resumed")
}
})
})
s.Run("Get", func() {
s.Given().RunCli([]string{"cron", "get", "not-found"}, func(t *testing.T, output string, err error) {
if assert.EqualError(t, err, "exit status 1") {
assert.Contains(t, output, `\"not-found\" not found`)
}
}).RunCli([]string{"cron", "get", "test-cron-wf-basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
assert.Contains(t, output, "Schedule:")
assert.Contains(t, output, "Suspended:")
assert.Contains(t, output, "StartingDeadlineSeconds:")
assert.Contains(t, output, "ConcurrencyPolicy:")
}
})
})
}
func (s *CLISuite) TestClusterTemplateCommands() {
s.Run("Create", func() {
s.Given().
RunCli([]string{"cluster-template", "create", "smoke/cluster-workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cluster-workflow-template-whalesay-template")
}
})
})
s.Run("Get", func() {
s.Given().
RunCli([]string{"cluster-template", "get", "cluster-workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cluster-workflow-template-whalesay-template")
}
})
})
s.Run("list", func() {
s.Given().
RunCli([]string{"cluster-template", "list"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cluster-workflow-template-whalesay-template")
}
})
})
s.Run("Delete", func() {
s.Given().
RunCli([]string{"cluster-template", "delete", "cluster-workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cluster-workflow-template-whalesay-template")
}
})
})
}
func (s *CLISuite) TestWorkflowTemplateRefSubmit() {
s.Run("CreateWFT", func() {
s.Given().RunCli([]string{"template", "create", "smoke/workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("CreateWF", func() {
s.Given().RunCli([]string{"submit", "testdata/workflow-template-ref.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("CreateCWFT", func() {
s.Given().RunCli([]string{"cluster-template", "create", "smoke/cluster-workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("CreateWFWithCWFTRef", func() {
s.Given().RunCli([]string{"submit", "testdata/cluster-workflow-template-ref.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
}
func TestCLISuite(t *testing.T) {
suite.Run(t, new(CLISuite))
}
| [
"\"ARGO_SERVER\"",
"\"CI\""
]
| []
| [
"ARGO_SERVER",
"CI"
]
| [] | ["ARGO_SERVER", "CI"] | go | 2 | 0 | |
examples/mesh_static_rhino_ansys.py | import rhinoscriptsyntax as rs
import compas_fea
from compas_fea.cad import rhino
from compas.datastructures import Mesh
from compas_fea.structure import Structure
from compas_fea.structure import FixedDisplacement
from compas_fea.structure import ElasticIsotropic
from compas_fea.structure import ShellSection
from compas_fea.structure import ElementProperties
from compas_fea.structure import GravityLoad
from compas_fea.structure import GeneralStep
from compas_rhino.helpers import mesh_from_guid
# Author(s): Tomás Méndez Echenagucia (github.com/tmsmendez)
# get mesh from rhino layer ----------------------------------------------------
mesh = mesh_from_guid(Mesh, rs.ObjectsByLayer('mesh')[0])
# add shell elements from mesh -------------------------------------------------
name = 'shell_example'
s = Structure(name=name, path=compas_fea.TEMP)
shell_keys = s.add_nodes_elements_from_mesh(mesh, element_type='ShellElement')
s.add_set('shell', 'element', shell_keys)
# add supports from rhino layer-------------------------------------------------
pts = rs.ObjectsByLayer('pts')
pts = [rs.PointCoordinates(pt) for pt in pts]
nkeys = []
for pt in pts:
nkeys.append(s.check_node_exists(pt))
s.add_set(name='support_nodes', type='NODE', selection=nkeys)
supppots = FixedDisplacement(name='supports', nodes='support_nodes')
s.add_displacement(supppots)
# add materials and sections -----------------------------------------------
E = 40 * 10 ** 9
v = .02
p = 2400
thickness = .02
matname = 'concrete'
concrete = ElasticIsotropic(name=matname, E=E, v=v, p=p)
s.add_material(concrete)
section = ShellSection(name='concrete_sec', t=thickness)
s.add_section(section)
prop = ElementProperties(name='floor', material=matname, section='concrete_sec', elsets=['shell'])
s.add_element_properties(prop)
# add gravity load -------------------------------------------------------------
s.add_load(GravityLoad(name='load_gravity', elements=['shell']))
# add steps --------------------------------------------------------------------
step = GeneralStep(name='gravity_step',
nlgeom=False,
displacements=['supports'],
loads=['load_gravity'],
type='static')
s.add_steps([step])
s.steps_order = ['gravity_step']
# analyse ----------------------------------------------------------------------
fields = 'all'
s.write_input_file(software='ansys', fields=fields)
s.analyse(software='ansys', cpus=4, delete=True)
s.extract_data(software='ansys', fields=fields, steps='last')
# visualise results ------------------------------------------------------------
rhino.plot_data(s, step='gravity_step', field='uz', scale=100, colorbar_size=0.3)
rhino.plot_reaction_forces(s, step='gravity_step', scale=.001)
| []
| []
| []
| [] | [] | python | null | null | null |
conda/base/context.py | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from errno import ENOENT
from logging import getLogger
import os
from os.path import abspath, basename, expanduser, isdir, isfile, join, split as path_split
import platform
import sys
from .constants import (APP_NAME, ChannelPriority, DEFAULTS_CHANNEL_NAME, REPODATA_FN,
DEFAULT_AGGRESSIVE_UPDATE_PACKAGES, DEFAULT_CHANNELS,
DEFAULT_CHANNEL_ALIAS, DEFAULT_CUSTOM_CHANNELS, DepsModifier,
ERROR_UPLOAD_URL, PLATFORM_DIRECTORIES, PREFIX_MAGIC_FILE, PathConflict,
ROOT_ENV_NAME, SEARCH_PATH, SafetyChecks, SatSolverChoice, UpdateModifier)
from .. import __version__ as CONDA_VERSION
from .._vendor.appdirs import user_data_dir
from .._vendor.auxlib.decorators import memoize, memoizedproperty
from .._vendor.auxlib.ish import dals
from .._vendor.boltons.setutils import IndexedSet
from .._vendor.frozendict import frozendict
from .._vendor.toolz import concat, concatv, unique
from ..common.compat import NoneType, iteritems, itervalues, odict, on_win, string_types
from ..common.configuration import (Configuration, ConfigurationLoadError, MapParameter,
ParameterLoader, PrimitiveParameter, SequenceParameter,
ValidationError)
from ..common._os.linux import linux_get_libc_version
from ..common.path import expand, paths_equal
from ..common.url import has_scheme, path_to_url, split_scheme_auth_token
from ..common.decorators import env_override
from .. import CONDA_PACKAGE_ROOT
try:
os.getcwd()
except (IOError, OSError) as e:
if e.errno == ENOENT:
# FileNotFoundError can occur when cwd has been deleted out from underneath the process.
# To resolve #6584, let's go with setting cwd to sys.prefix, and see how far we get.
os.chdir(sys.prefix)
else:
raise
log = getLogger(__name__)
_platform_map = {
'linux2': 'linux',
'linux': 'linux',
'darwin': 'osx',
'win32': 'win',
'zos': 'zos',
}
non_x86_linux_machines = {
'armv6l',
'armv7l',
'aarch64',
'ppc64le',
}
_arch_names = {
32: 'x86',
64: 'x86_64',
}
user_rc_path = abspath(expanduser('~/.condarc'))
sys_rc_path = join(sys.prefix, '.condarc')
def mockable_context_envs_dirs(root_writable, root_prefix, _envs_dirs):
if root_writable:
fixed_dirs = (
join(root_prefix, 'envs'),
join('~', '.conda', 'envs'),
)
else:
fixed_dirs = (
join('~', '.conda', 'envs'),
join(root_prefix, 'envs'),
)
if on_win:
fixed_dirs += join(user_data_dir(APP_NAME, APP_NAME), 'envs'),
return tuple(IndexedSet(expand(p) for p in concatv(_envs_dirs, fixed_dirs)))
def channel_alias_validation(value):
if value and not has_scheme(value):
return "channel_alias value '%s' must have scheme/protocol." % value
return True
def default_python_default():
ver = sys.version_info
return '%d.%d' % (ver.major, ver.minor)
def default_python_validation(value):
if value:
if len(value) == 3 and value[1] == '.':
try:
value = float(value)
if 2.0 <= value < 4.0:
return True
except ValueError: # pragma: no cover
pass
else:
# Set to None or '' meaning no python pinning
return True
return "default_python value '%s' not of the form '[23].[0-9]' or ''" % value
def ssl_verify_validation(value):
if isinstance(value, string_types):
if not isfile(value) and not isdir(value):
return ("ssl_verify value '%s' must be a boolean, a path to a "
"certificate bundle file, or a path to a directory containing "
"certificates of trusted CAs." % value)
return True
class Context(Configuration):
add_pip_as_python_dependency = ParameterLoader(PrimitiveParameter(True))
allow_conda_downgrades = ParameterLoader(PrimitiveParameter(False))
# allow cyclical dependencies, or raise
allow_cycles = ParameterLoader(PrimitiveParameter(True))
allow_softlinks = ParameterLoader(PrimitiveParameter(False))
auto_update_conda = ParameterLoader(PrimitiveParameter(True), aliases=('self_update',))
auto_activate_base = ParameterLoader(PrimitiveParameter(True))
auto_stack = ParameterLoader(PrimitiveParameter(0))
notify_outdated_conda = ParameterLoader(PrimitiveParameter(True))
clobber = ParameterLoader(PrimitiveParameter(False))
changeps1 = ParameterLoader(PrimitiveParameter(True))
env_prompt = ParameterLoader(PrimitiveParameter("({default_env}) "))
create_default_packages = ParameterLoader(
SequenceParameter(PrimitiveParameter("", element_type=string_types)))
default_python = ParameterLoader(
PrimitiveParameter(default_python_default(),
element_type=string_types + (NoneType,),
validation=default_python_validation))
download_only = ParameterLoader(PrimitiveParameter(False))
enable_private_envs = ParameterLoader(PrimitiveParameter(False))
force_32bit = ParameterLoader(PrimitiveParameter(False))
non_admin_enabled = ParameterLoader(PrimitiveParameter(True))
pip_interop_enabled = ParameterLoader(PrimitiveParameter(False))
# multithreading in various places
_default_threads = ParameterLoader(PrimitiveParameter(0, element_type=int),
aliases=('default_threads',))
_repodata_threads = ParameterLoader(PrimitiveParameter(0, element_type=int),
aliases=('repodata_threads',))
_verify_threads = ParameterLoader(PrimitiveParameter(0, element_type=int),
aliases=('verify_threads',))
# this one actually defaults to 1 - that is handled in the property below
_execute_threads = ParameterLoader(PrimitiveParameter(0, element_type=int),
aliases=('execute_threads',))
# Safety & Security
_aggressive_update_packages = ParameterLoader(
SequenceParameter(
PrimitiveParameter("", element_type=string_types),
DEFAULT_AGGRESSIVE_UPDATE_PACKAGES),
aliases=('aggressive_update_packages',))
safety_checks = ParameterLoader(PrimitiveParameter(SafetyChecks.warn))
extra_safety_checks = ParameterLoader(PrimitiveParameter(False))
path_conflict = ParameterLoader(PrimitiveParameter(PathConflict.clobber))
pinned_packages = ParameterLoader(SequenceParameter(
PrimitiveParameter("", element_type=string_types),
string_delimiter='&')) # TODO: consider a different string delimiter # NOQA
disallowed_packages = ParameterLoader(
SequenceParameter(
PrimitiveParameter("", element_type=string_types), string_delimiter='&'),
aliases=('disallow',))
rollback_enabled = ParameterLoader(PrimitiveParameter(True))
track_features = ParameterLoader(
SequenceParameter(PrimitiveParameter("", element_type=string_types)))
use_index_cache = ParameterLoader(PrimitiveParameter(False))
separate_format_cache = ParameterLoader(PrimitiveParameter(False))
_root_prefix = ParameterLoader(PrimitiveParameter(""), aliases=('root_dir', 'root_prefix'))
_envs_dirs = ParameterLoader(
SequenceParameter(PrimitiveParameter("", element_type=string_types),
string_delimiter=os.pathsep),
aliases=('envs_dirs', 'envs_path'),
expandvars=True)
_pkgs_dirs = ParameterLoader(SequenceParameter(PrimitiveParameter("", string_types)),
aliases=('pkgs_dirs',),
expandvars=True)
_subdir = ParameterLoader(PrimitiveParameter(''), aliases=('subdir',))
_subdirs = ParameterLoader(
SequenceParameter(PrimitiveParameter("", string_types)), aliases=('subdirs',))
local_repodata_ttl = ParameterLoader(PrimitiveParameter(1, element_type=(bool, int)))
# number of seconds to cache repodata locally
# True/1: respect Cache-Control max-age header
# False/0: always fetch remote repodata (HTTP 304 responses respected)
# remote connection details
ssl_verify = ParameterLoader(
PrimitiveParameter(True,
element_type=string_types + (bool,),
validation=ssl_verify_validation),
aliases=('verify_ssl',),
expandvars=True)
client_ssl_cert = ParameterLoader(
PrimitiveParameter(None, element_type=string_types + (NoneType,)),
aliases=('client_cert',),
expandvars=True)
client_ssl_cert_key = ParameterLoader(
PrimitiveParameter(None, element_type=string_types + (NoneType,)),
aliases=('client_cert_key',),
expandvars=True)
proxy_servers = ParameterLoader(
MapParameter(PrimitiveParameter(None, string_types + (NoneType,))),
expandvars=True)
remote_connect_timeout_secs = ParameterLoader(PrimitiveParameter(9.15))
remote_read_timeout_secs = ParameterLoader(PrimitiveParameter(60.))
remote_max_retries = ParameterLoader(PrimitiveParameter(3))
remote_backoff_factor = ParameterLoader(PrimitiveParameter(1))
add_anaconda_token = ParameterLoader(PrimitiveParameter(True), aliases=('add_binstar_token',))
# #############################
# channels
# #############################
allow_non_channel_urls = ParameterLoader(PrimitiveParameter(False))
_channel_alias = ParameterLoader(
PrimitiveParameter(DEFAULT_CHANNEL_ALIAS,
validation=channel_alias_validation),
aliases=('channel_alias',))
channel_priority = ParameterLoader(PrimitiveParameter(ChannelPriority.FLEXIBLE))
_channels = ParameterLoader(
SequenceParameter(PrimitiveParameter(
"", element_type=string_types), default=(DEFAULTS_CHANNEL_NAME,)),
aliases=('channels', 'channel',),
expandvars=True) # channel for args.channel
_custom_channels = ParameterLoader(
MapParameter(PrimitiveParameter("", element_type=string_types), DEFAULT_CUSTOM_CHANNELS),
aliases=('custom_channels',),
expandvars=True)
_custom_multichannels = ParameterLoader(
MapParameter(SequenceParameter(PrimitiveParameter("", element_type=string_types))),
aliases=('custom_multichannels',),
expandvars=True)
_default_channels = ParameterLoader(
SequenceParameter(PrimitiveParameter("", element_type=string_types), DEFAULT_CHANNELS),
aliases=('default_channels',),
expandvars=True)
_migrated_channel_aliases = ParameterLoader(
SequenceParameter(PrimitiveParameter("", element_type=string_types)),
aliases=('migrated_channel_aliases',))
migrated_custom_channels = ParameterLoader(
MapParameter(PrimitiveParameter("", element_type=string_types)),
expandvars=True) # TODO: also take a list of strings
override_channels_enabled = ParameterLoader(PrimitiveParameter(True))
show_channel_urls = ParameterLoader(PrimitiveParameter(None, element_type=(bool, NoneType)))
use_local = ParameterLoader(PrimitiveParameter(False))
whitelist_channels = ParameterLoader(
SequenceParameter(PrimitiveParameter("", element_type=string_types)),
expandvars=True)
restore_free_channel = ParameterLoader(PrimitiveParameter(False))
repodata_fns = ParameterLoader(
SequenceParameter(
PrimitiveParameter("", element_type=string_types),
("current_repodata.json", REPODATA_FN)))
_use_only_tar_bz2 = ParameterLoader(PrimitiveParameter(None, element_type=(bool, NoneType)),
aliases=('use_only_tar_bz2',))
always_softlink = ParameterLoader(PrimitiveParameter(False), aliases=('softlink',))
always_copy = ParameterLoader(PrimitiveParameter(False), aliases=('copy',))
always_yes = ParameterLoader(
PrimitiveParameter(None, element_type=(bool, NoneType)), aliases=('yes',))
debug = ParameterLoader(PrimitiveParameter(False))
dev = ParameterLoader(PrimitiveParameter(False))
dry_run = ParameterLoader(PrimitiveParameter(False))
error_upload_url = ParameterLoader(PrimitiveParameter(ERROR_UPLOAD_URL))
force = ParameterLoader(PrimitiveParameter(False))
json = ParameterLoader(PrimitiveParameter(False))
offline = ParameterLoader(PrimitiveParameter(False))
quiet = ParameterLoader(PrimitiveParameter(False))
ignore_pinned = ParameterLoader(PrimitiveParameter(False))
report_errors = ParameterLoader(PrimitiveParameter(None, element_type=(bool, NoneType)))
shortcuts = ParameterLoader(PrimitiveParameter(True))
_verbosity = ParameterLoader(
PrimitiveParameter(0, element_type=int), aliases=('verbose', 'verbosity'))
# ######################################################
# ## Solver Configuration ##
# ######################################################
deps_modifier = ParameterLoader(PrimitiveParameter(DepsModifier.NOT_SET))
update_modifier = ParameterLoader(PrimitiveParameter(UpdateModifier.UPDATE_SPECS))
sat_solver = ParameterLoader(PrimitiveParameter(SatSolverChoice.PYCOSAT))
solver_ignore_timestamps = ParameterLoader(PrimitiveParameter(False))
# # CLI-only
# no_deps = ParameterLoader(PrimitiveParameter(NULL, element_type=(type(NULL), bool)))
# # CLI-only
# only_deps = ParameterLoader(PrimitiveParameter(NULL, element_type=(type(NULL), bool)))
#
# freeze_installed = ParameterLoader(PrimitiveParameter(False))
# update_deps = ParameterLoader(PrimitiveParameter(False), aliases=('update_dependencies',))
# update_specs = ParameterLoader(PrimitiveParameter(False))
# update_all = ParameterLoader(PrimitiveParameter(False))
force_remove = ParameterLoader(PrimitiveParameter(False))
force_reinstall = ParameterLoader(PrimitiveParameter(False))
target_prefix_override = ParameterLoader(PrimitiveParameter(''))
unsatisfiable_hints = ParameterLoader(PrimitiveParameter(True))
# conda_build
bld_path = ParameterLoader(PrimitiveParameter(''))
anaconda_upload = ParameterLoader(
PrimitiveParameter(None, element_type=(bool, NoneType)), aliases=('binstar_upload',))
_croot = ParameterLoader(PrimitiveParameter(''), aliases=('croot',))
_conda_build = ParameterLoader(
MapParameter(PrimitiveParameter("", element_type=string_types)),
aliases=('conda-build', 'conda_build'))
def __init__(self, search_path=None, argparse_args=None):
if search_path is None:
search_path = SEARCH_PATH
if argparse_args:
# This block of code sets CONDA_PREFIX based on '-n' and '-p' flags, so that
# configuration can be properly loaded from those locations
func_name = ('func' in argparse_args and argparse_args.func or '').rsplit('.', 1)[-1]
if func_name in ('create', 'install', 'update', 'remove', 'uninstall', 'upgrade'):
if 'prefix' in argparse_args and argparse_args.prefix:
os.environ['CONDA_PREFIX'] = argparse_args.prefix
elif 'name' in argparse_args and argparse_args.name:
# Currently, usage of the '-n' flag is inefficient, with all configuration
# files being loaded/re-loaded at least two times.
target_prefix = determine_target_prefix(context, argparse_args)
if target_prefix != context.root_prefix:
os.environ['CONDA_PREFIX'] = determine_target_prefix(context,
argparse_args)
super(Context, self).__init__(search_path=search_path, app_name=APP_NAME,
argparse_args=argparse_args)
def post_build_validation(self):
errors = []
if self.client_ssl_cert_key and not self.client_ssl_cert:
error = ValidationError('client_ssl_cert', self.client_ssl_cert, "<<merged>>",
"'client_ssl_cert' is required when 'client_ssl_cert_key' "
"is defined")
errors.append(error)
if self.always_copy and self.always_softlink:
error = ValidationError('always_copy', self.always_copy, "<<merged>>",
"'always_copy' and 'always_softlink' are mutually exclusive. "
"Only one can be set to 'True'.")
errors.append(error)
return errors
@property
def conda_build_local_paths(self):
# does file system reads to make sure paths actually exist
return tuple(unique(full_path for full_path in (
expand(d) for d in (
self._croot,
self.bld_path,
self.conda_build.get('root-dir'),
join(self.root_prefix, 'conda-bld'),
'~/conda-bld',
) if d
) if isdir(full_path)))
@property
def conda_build_local_urls(self):
return tuple(path_to_url(p) for p in self.conda_build_local_paths)
@property
def croot(self):
"""This is where source caches and work folders live"""
if self._croot:
return abspath(expanduser(self._croot))
elif self.bld_path:
return abspath(expanduser(self.bld_path))
elif 'root-dir' in self.conda_build:
return abspath(expanduser(self.conda_build['root-dir']))
elif self.root_writable:
return join(self.root_prefix, 'conda-bld')
else:
return expand('~/conda-bld')
@property
def local_build_root(self):
return self.croot
@property
def conda_build(self):
# conda-build needs its config map to be mutable
try:
return self.__conda_build
except AttributeError:
self.__conda_build = __conda_build = dict(self._conda_build)
return __conda_build
@property
def arch_name(self):
m = platform.machine()
if self.platform == 'linux' and m in non_x86_linux_machines:
return m
else:
return _arch_names[self.bits]
@property
def conda_private(self):
return conda_in_private_env()
@property
def platform(self):
return _platform_map.get(sys.platform, 'unknown')
@property
def default_threads(self):
return self._default_threads if self._default_threads else None
@property
def repodata_threads(self):
return self._repodata_threads if self._repodata_threads else self.default_threads
@property
def verify_threads(self):
if self._verify_threads:
threads = self._verify_threads
elif self.default_threads:
threads = self.default_threads
else:
threads = 1
return threads
@property
def execute_threads(self):
if self._execute_threads:
threads = self._execute_threads
elif self.default_threads:
threads = self.default_threads
else:
threads = 1
return threads
@property
def subdir(self):
if self._subdir:
return self._subdir
m = platform.machine()
if m in non_x86_linux_machines:
return 'linux-%s' % m
elif self.platform == 'zos':
return 'zos-z'
else:
return '%s-%d' % (self.platform, self.bits)
@property
def subdirs(self):
return self._subdirs if self._subdirs else (self.subdir, 'noarch')
@memoizedproperty
def known_subdirs(self):
return frozenset(concatv(PLATFORM_DIRECTORIES, self.subdirs))
@property
def bits(self):
if self.force_32bit:
return 32
else:
return 8 * tuple.__itemsize__
@property
def root_dir(self):
# root_dir is an alias for root_prefix, we prefer the name "root_prefix"
# because it is more consistent with other names
return self.root_prefix
@property
def root_writable(self):
# rather than using conda.gateways.disk.test.prefix_is_writable
# let's shortcut and assume the root prefix exists
path = join(self.root_prefix, PREFIX_MAGIC_FILE)
if isfile(path):
try:
fh = open(path, 'a+')
except (IOError, OSError) as e:
log.debug(e)
return False
else:
fh.close()
return True
return False
@property
def envs_dirs(self):
return mockable_context_envs_dirs(self.root_writable, self.root_prefix, self._envs_dirs)
@property
def pkgs_dirs(self):
if self._pkgs_dirs:
return tuple(IndexedSet(expand(p) for p in self._pkgs_dirs))
else:
cache_dir_name = 'pkgs32' if context.force_32bit else 'pkgs'
fixed_dirs = (
self.root_prefix,
join('~', '.conda'),
)
if on_win:
fixed_dirs += user_data_dir(APP_NAME, APP_NAME),
return tuple(IndexedSet(expand(join(p, cache_dir_name)) for p in (fixed_dirs)))
@memoizedproperty
def trash_dir(self):
# TODO: this inline import can be cleaned up by moving pkgs_dir write detection logic
from ..core.package_cache_data import PackageCacheData
pkgs_dir = PackageCacheData.first_writable().pkgs_dir
trash_dir = join(pkgs_dir, '.trash')
from ..gateways.disk.create import mkdir_p
mkdir_p(trash_dir)
return trash_dir
@property
def default_prefix(self):
if self.active_prefix:
return self.active_prefix
_default_env = os.getenv('CONDA_DEFAULT_ENV')
if _default_env in (None, ROOT_ENV_NAME, 'root'):
return self.root_prefix
elif os.sep in _default_env:
return abspath(_default_env)
else:
for envs_dir in self.envs_dirs:
default_prefix = join(envs_dir, _default_env)
if isdir(default_prefix):
return default_prefix
return join(self.envs_dirs[0], _default_env)
@property
def active_prefix(self):
return os.getenv('CONDA_PREFIX')
@property
def shlvl(self):
return int(os.getenv('CONDA_SHLVL', -1))
@property
def aggressive_update_packages(self):
from ..models.match_spec import MatchSpec
return tuple(MatchSpec(s) for s in self._aggressive_update_packages)
@property
def target_prefix(self):
# used for the prefix that is the target of the command currently being executed
# different from the active prefix, which is sometimes given by -p or -n command line flags
return determine_target_prefix(self)
@memoizedproperty
def root_prefix(self):
if self._root_prefix:
return abspath(expanduser(self._root_prefix))
elif conda_in_private_env():
return abspath(join(self.conda_prefix, '..', '..'))
else:
return self.conda_prefix
@property
def conda_prefix(self):
return abspath(sys.prefix)
@property
# This is deprecated, please use conda_exe_vars_dict instead.
def conda_exe(self):
bin_dir = 'Scripts' if on_win else 'bin'
exe = 'conda.exe' if on_win else 'conda'
return join(self.conda_prefix, bin_dir, exe)
@property
def conda_exe_vars_dict(self):
'''
An OrderedDict so the vars can refer to each other if necessary.
None means unset it.
'''
if context.dev:
return OrderedDict([('CONDA_EXE', sys.executable),
('PYTHONPATH', os.path.dirname(CONDA_PACKAGE_ROOT) + '{}{}'.format(
os.pathsep, os.environ.get('PYTHONPATH', ''))),
('_CE_M', '-m'),
('_CE_CONDA', 'conda'),
('CONDA_PYTHON_EXE', sys.executable)])
else:
bin_dir = 'Scripts' if on_win else 'bin'
exe = 'conda.exe' if on_win else 'conda'
# I was going to use None to indicate a variable to unset, but that gets tricky with
# error-on-undefined.
return OrderedDict([('CONDA_EXE', os.path.join(sys.prefix, bin_dir, exe)),
('_CE_M', ''),
('_CE_CONDA', ''),
('CONDA_PYTHON_EXE', sys.executable)])
@memoizedproperty
def channel_alias(self):
from ..models.channel import Channel
location, scheme, auth, token = split_scheme_auth_token(self._channel_alias)
return Channel(scheme=scheme, auth=auth, location=location, token=token)
@property
def migrated_channel_aliases(self):
from ..models.channel import Channel
return tuple(Channel(scheme=scheme, auth=auth, location=location, token=token)
for location, scheme, auth, token in
(split_scheme_auth_token(c) for c in self._migrated_channel_aliases))
@property
def prefix_specified(self):
return (self._argparse_args.get("prefix") is not None
or self._argparse_args.get("name") is not None)
@memoizedproperty
def default_channels(self):
# the format for 'default_channels' is a list of strings that either
# - start with a scheme
# - are meant to be prepended with channel_alias
return self.custom_multichannels[DEFAULTS_CHANNEL_NAME]
@memoizedproperty
def custom_multichannels(self):
from ..models.channel import Channel
default_channels = list(self._default_channels)
if self.restore_free_channel:
default_channels.insert(1, 'https://repo.anaconda.com/pkgs/free')
reserved_multichannel_urls = odict((
(DEFAULTS_CHANNEL_NAME, default_channels),
('local', self.conda_build_local_urls),
))
reserved_multichannels = odict(
(name, tuple(
Channel.make_simple_channel(self.channel_alias, url) for url in urls)
) for name, urls in iteritems(reserved_multichannel_urls)
)
custom_multichannels = odict(
(name, tuple(
Channel.make_simple_channel(self.channel_alias, url) for url in urls)
) for name, urls in iteritems(self._custom_multichannels)
)
all_multichannels = odict(
(name, channels)
for name, channels in concat(map(iteritems, (
custom_multichannels,
reserved_multichannels, # reserved comes last, so reserved overrides custom
)))
)
return all_multichannels
@memoizedproperty
def custom_channels(self):
from ..models.channel import Channel
custom_channels = (Channel.make_simple_channel(self.channel_alias, url, name)
for name, url in iteritems(self._custom_channels))
channels_from_multichannels = concat(channel for channel
in itervalues(self.custom_multichannels))
all_channels = odict((x.name, x) for x in (ch for ch in concatv(
channels_from_multichannels,
custom_channels,
)))
return all_channels
@property
def channels(self):
local_add = ('local',) if self.use_local else ()
if (self._argparse_args and 'override_channels' in self._argparse_args
and self._argparse_args['override_channels']):
if not self.override_channels_enabled:
from ..exceptions import OperationNotAllowed
raise OperationNotAllowed(dals("""
Overriding channels has been disabled.
"""))
elif not (self._argparse_args and 'channel' in self._argparse_args
and self._argparse_args['channel']):
from ..exceptions import CommandArgumentError
raise CommandArgumentError(dals("""
At least one -c / --channel flag must be supplied when using --override-channels.
"""))
else:
return tuple(IndexedSet(concatv(local_add, self._argparse_args['channel'])))
# add 'defaults' channel when necessary if --channel is given via the command line
if self._argparse_args and 'channel' in self._argparse_args:
# TODO: it's args.channel right now, not channels
argparse_channels = tuple(self._argparse_args['channel'] or ())
if argparse_channels and argparse_channels == self._channels:
return tuple(IndexedSet(concatv(local_add, argparse_channels,
(DEFAULTS_CHANNEL_NAME,))))
return tuple(IndexedSet(concatv(local_add, self._channels)))
@property
def use_only_tar_bz2(self):
from ..models.version import VersionOrder
# we avoid importing this at the top to avoid PATH issues. Ensure that this
# is only called when use_only_tar_bz2 is first called.
import conda_package_handling.api
use_only_tar_bz2 = False
if self._use_only_tar_bz2 is None:
try:
import conda_build
use_only_tar_bz2 = VersionOrder(conda_build.__version__) < VersionOrder("3.18.3")
except ImportError:
pass
if self._argparse_args and 'use_only_tar_bz2' in self._argparse_args:
use_only_tar_bz2 &= self._argparse_args['use_only_tar_bz2']
return ((hasattr(conda_package_handling.api, 'libarchive_enabled') and
not conda_package_handling.api.libarchive_enabled) or
self._use_only_tar_bz2 or
use_only_tar_bz2)
@property
def binstar_upload(self):
# backward compatibility for conda-build
return self.anaconda_upload
@property
def verbosity(self):
return 2 if self.debug else self._verbosity
@memoizedproperty
def user_agent(self):
builder = ["conda/%s requests/%s" % (CONDA_VERSION, self.requests_version)]
builder.append("%s/%s" % self.python_implementation_name_version)
builder.append("%s/%s" % self.platform_system_release)
builder.append("%s/%s" % self.os_distribution_name_version)
if self.libc_family_version[0]:
builder.append("%s/%s" % self.libc_family_version)
return " ".join(builder)
@memoizedproperty
def requests_version(self):
try:
from requests import __version__ as REQUESTS_VERSION
except ImportError: # pragma: no cover
try:
from pip._vendor.requests import __version__ as REQUESTS_VERSION
except ImportError:
REQUESTS_VERSION = "unknown"
return REQUESTS_VERSION
@memoizedproperty
def python_implementation_name_version(self):
# CPython, Jython
# '2.7.14'
return platform.python_implementation(), platform.python_version()
@memoizedproperty
def platform_system_release(self):
# tuple of system name and release version
#
# `uname -s` Linux, Windows, Darwin, Java
#
# `uname -r`
# '17.4.0' for macOS
# '10' or 'NT' for Windows
return platform.system(), platform.release()
@memoizedproperty
def os_distribution_name_version(self):
# tuple of os distribution name and version
# e.g.
# 'debian', '9'
# 'OSX', '10.13.6'
# 'Windows', '10.0.17134'
platform_name = self.platform_system_release[0]
if platform_name == 'Linux':
from .._vendor.distro import id, version
try:
distinfo = id(), version(best=True)
except Exception as e:
log.debug('%r', e, exc_info=True)
distinfo = ('Linux', 'unknown')
distribution_name, distribution_version = distinfo[0], distinfo[1]
elif platform_name == 'Darwin':
distribution_name = 'OSX'
distribution_version = platform.mac_ver()[0]
else:
distribution_name = platform.system()
distribution_version = platform.version()
return distribution_name, distribution_version
@memoizedproperty
def libc_family_version(self):
# tuple of lic_family and libc_version
# None, None if not on Linux
libc_family, libc_version = linux_get_libc_version()
return libc_family, libc_version
@memoizedproperty
def cpu_flags(self):
# DANGER: This is rather slow
info = _get_cpu_info()
return info['flags']
@memoizedproperty
@env_override('CONDA_OVERRIDE_CUDA', convert_empty_to_none=True)
def cuda_version(self):
from conda.common.cuda import cuda_detect
return cuda_detect()
@property
def category_map(self):
return odict((
('Channel Configuration', (
'channels',
'channel_alias',
'default_channels',
'override_channels_enabled',
'whitelist_channels',
'custom_channels',
'custom_multichannels',
'migrated_channel_aliases',
'migrated_custom_channels',
'add_anaconda_token',
'allow_non_channel_urls',
'restore_free_channel',
'repodata_fns',
'use_only_tar_bz2',
'repodata_threads',
)),
('Basic Conda Configuration', ( # TODO: Is there a better category name here?
'envs_dirs',
'pkgs_dirs',
'default_threads',
)),
('Network Configuration', (
'client_ssl_cert',
'client_ssl_cert_key',
'local_repodata_ttl',
'offline',
'proxy_servers',
'remote_connect_timeout_secs',
'remote_max_retries',
'remote_backoff_factor',
'remote_read_timeout_secs',
'ssl_verify',
)),
('Solver Configuration', (
'aggressive_update_packages',
'auto_update_conda',
'channel_priority',
'create_default_packages',
'disallowed_packages',
'force_reinstall',
'pinned_packages',
'pip_interop_enabled',
'track_features',
)),
('Package Linking and Install-time Configuration', (
'allow_softlinks',
'always_copy',
'always_softlink',
'path_conflict',
'rollback_enabled',
'safety_checks',
'extra_safety_checks',
'shortcuts',
'non_admin_enabled',
'separate_format_cache',
'verify_threads',
'execute_threads',
)),
('Conda-build Configuration', (
'bld_path',
'croot',
'anaconda_upload',
'conda_build',
)),
('Output, Prompt, and Flow Control Configuration', (
'always_yes',
'auto_activate_base',
'auto_stack',
'changeps1',
'env_prompt',
'json',
'notify_outdated_conda',
'quiet',
'report_errors',
'show_channel_urls',
'verbosity',
'unsatisfiable_hints'
)),
('CLI-only', (
'deps_modifier',
'update_modifier',
'force',
'force_remove',
'clobber',
'dry_run',
'download_only',
'ignore_pinned',
'use_index_cache',
'use_local',
)),
('Hidden and Undocumented', (
'allow_cycles', # allow cyclical dependencies, or raise
'allow_conda_downgrades',
'add_pip_as_python_dependency',
'debug',
'dev',
'default_python',
'enable_private_envs',
'error_upload_url', # should remain undocumented
'force_32bit',
'root_prefix',
'sat_solver',
'solver_ignore_timestamps',
'subdir',
'subdirs',
# https://conda.io/docs/config.html#disable-updating-of-dependencies-update-dependencies # NOQA
# I don't think this documentation is correct any longer. # NOQA
'target_prefix_override',
# used to override prefix rewriting, for e.g. building docker containers or RPMs # NOQA
)),
))
def get_descriptions(self):
return self.description_map
@memoizedproperty
def description_map(self):
return frozendict({
'add_anaconda_token': dals("""
In conjunction with the anaconda command-line client (installed with
`conda install anaconda-client`), and following logging into an Anaconda
Server API site using `anaconda login`, automatically apply a matching
private token to enable access to private packages and channels.
"""),
# 'add_pip_as_python_dependency': dals("""
# Add pip, wheel and setuptools as dependencies of python. This ensures pip,
# wheel and setuptools will always be installed any time python is installed.
# """),
'aggressive_update_packages': dals("""
A list of packages that, if installed, are always updated to the latest possible
version.
"""),
'allow_non_channel_urls': dals("""
Warn, but do not fail, when conda detects a channel url is not a valid channel.
"""),
'allow_softlinks': dals("""
When allow_softlinks is True, conda uses hard-links when possible, and soft-links
(symlinks) when hard-links are not possible, such as when installing on a
different filesystem than the one that the package cache is on. When
allow_softlinks is False, conda still uses hard-links when possible, but when it
is not possible, conda copies files. Individual packages can override
this setting, specifying that certain files should never be soft-linked (see the
no_link option in the build recipe documentation).
"""),
'always_copy': dals("""
Register a preference that files be copied into a prefix during install rather
than hard-linked.
"""),
'always_softlink': dals("""
Register a preference that files be soft-linked (symlinked) into a prefix during
install rather than hard-linked. The link source is the 'pkgs_dir' package cache
from where the package is being linked. WARNING: Using this option can result in
corruption of long-lived conda environments. Package caches are *caches*, which
means there is some churn and invalidation. With this option, the contents of
environments can be switched out (or erased) via operations on other environments.
"""),
'always_yes': dals("""
Automatically choose the 'yes' option whenever asked to proceed with a conda
operation, such as when running `conda install`.
"""),
'anaconda_upload': dals("""
Automatically upload packages built with conda build to anaconda.org.
"""),
'auto_activate_base': dals("""
Automatically activate the base environment during shell initialization.
"""),
'auto_update_conda': dals("""
Automatically update conda when a newer or higher priority version is detected.
"""),
'auto_stack': dals("""
Implicitly use --stack when using activate if current level of nesting
(as indicated by CONDA_SHLVL environment variable) is less than or equal to
specified value. 0 or false disables automatic stacking, 1 or true enables
it for one level.
"""),
'bld_path': dals("""
The location where conda-build will put built packages. Same as 'croot', but
'croot' takes precedence when both are defined. Also used in construction of the
'local' multichannel.
"""),
'changeps1': dals("""
When using activate, change the command prompt ($PS1) to include the
activated environment.
"""),
'channel_alias': dals("""
The prepended url location to associate with channel names.
"""),
'channel_priority': dals("""
Accepts values of 'strict', 'flexible', and 'disabled'. The default value
is 'flexible'. With strict channel priority, packages in lower priority channels
are not considered if a package with the same name appears in a higher
priority channel. With flexible channel priority, the solver may reach into
lower priority channels to fulfill dependencies, rather than raising an
unsatisfiable error. With channel priority disabled, package version takes
precedence, and the configured priority of channels is used only to break ties.
In previous versions of conda, this parameter was configured as either True or
False. True is now an alias to 'flexible'.
"""),
'channels': dals("""
The list of conda channels to include for relevant operations.
"""),
'client_ssl_cert': dals("""
A path to a single file containing a private key and certificate (e.g. .pem
file). Alternately, use client_ssl_cert_key in conjuction with client_ssl_cert
for individual files.
"""),
'client_ssl_cert_key': dals("""
Used in conjunction with client_ssl_cert for a matching key file.
"""),
# 'clobber': dals("""
# Allow clobbering of overlapping file paths within packages, and suppress
# related warnings. Overrides the path_conflict configuration value when
# set to 'warn' or 'prevent'.
# """),
'conda_build': dals("""
General configuration parameters for conda-build.
"""),
# TODO: add shortened link to docs for conda_build at See https://conda.io/docs/user-guide/configuration/use-condarc.html#conda-build-configuration # NOQA
'create_default_packages': dals("""
Packages that are by default added to a newly created environments.
"""), # TODO: This is a bad parameter name. Consider an alternate.
'croot': dals("""
The location where conda-build will put built packages. Same as 'bld_path', but
'croot' takes precedence when both are defined. Also used in construction of the
'local' multichannel.
"""),
'custom_channels': dals("""
A map of key-value pairs where the key is a channel name and the value is
a channel location. Channels defined here override the default
'channel_alias' value. The channel name (key) is not included in the channel
location (value). For example, to override the location of the 'conda-forge'
channel where the url to repodata is
https://anaconda-repo.dev/packages/conda-forge/linux-64/repodata.json, add an
entry 'conda-forge: https://anaconda-repo.dev/packages'.
"""),
'custom_multichannels': dals("""
A multichannel is a metachannel composed of multiple channels. The two reserved
multichannels are 'defaults' and 'local'. The 'defaults' multichannel is
customized using the 'default_channels' parameter. The 'local'
multichannel is a list of file:// channel locations where conda-build stashes
successfully-built packages. Other multichannels can be defined with
custom_multichannels, where the key is the multichannel name and the value is
a list of channel names and/or channel urls.
"""),
'default_channels': dals("""
The list of channel names and/or urls used for the 'defaults' multichannel.
"""),
# 'default_python': dals("""
# specifies the default major & minor version of Python to be used when
# building packages with conda-build. Also used to determine the major
# version of Python (2/3) to be used in new environments. Defaults to
# the version used by conda itself.
# """),
'default_threads': dals("""
Threads to use by default for parallel operations. Default is None,
which allows operations to choose themselves. For more specific
control, see the other *_threads parameters:
* repodata_threads - for fetching/loading repodata
* verify_threads - for verifying package contents in transactions
* execute_threads - for carrying out the unlinking and linking steps
"""),
'disallowed_packages': dals("""
Package specifications to disallow installing. The default is to allow
all packages.
"""),
'download_only': dals("""
Solve an environment and ensure package caches are populated, but exit
prior to unlinking and linking packages into the prefix
"""),
'envs_dirs': dals("""
The list of directories to search for named environments. When creating a new
named environment, the environment will be placed in the first writable
location.
"""),
'env_prompt': dals("""
Template for prompt modification based on the active environment. Currently
supported template variables are '{prefix}', '{name}', and '{default_env}'.
'{prefix}' is the absolute path to the active environment. '{name}' is the
basename of the active environment prefix. '{default_env}' holds the value
of '{name}' if the active environment is a conda named environment ('-n'
flag), or otherwise holds the value of '{prefix}'. Templating uses python's
str.format() method.
"""),
'execute_threads': dals("""
Threads to use when performing the unlink/link transaction. When not set,
defaults to 1. This step is pretty strongly I/O limited, and you may not
see much benefit here.
"""),
'force_reinstall': dals("""
Ensure that any user-requested package for the current operation is uninstalled
and reinstalled, even if that package already exists in the environment.
"""),
# 'force': dals("""
# Override any of conda's objections and safeguards for installing packages and
# potentially breaking environments. Also re-installs the package, even if the
# package is already installed. Implies --no-deps.
# """),
# 'force_32bit': dals("""
# CONDA_FORCE_32BIT should only be used when running conda-build (in order
# to build 32-bit packages on a 64-bit system). We don't want to mention it
# in the documentation, because it can mess up a lot of things.
# """),
'json': dals("""
Ensure all output written to stdout is structured json.
"""),
'local_repodata_ttl': dals("""
For a value of False or 0, always fetch remote repodata (HTTP 304 responses
respected). For a value of True or 1, respect the HTTP Cache-Control max-age
header. Any other positive integer values is the number of seconds to locally
cache repodata before checking the remote server for an update.
"""),
'migrated_channel_aliases': dals("""
A list of previously-used channel_alias values. Useful when switching between
different Anaconda Repository instances.
"""),
'migrated_custom_channels': dals("""
A map of key-value pairs where the key is a channel name and the value is
the previous location of the channel.
"""),
# 'no_deps': dals("""
# Do not install, update, remove, or change dependencies. This WILL lead to broken
# environments and inconsistent behavior. Use at your own risk.
# """),
'non_admin_enabled': dals("""
Allows completion of conda's create, install, update, and remove operations, for
non-privileged (non-root or non-administrator) users.
"""),
'notify_outdated_conda': dals("""
Notify if a newer version of conda is detected during a create, install, update,
or remove operation.
"""),
'offline': dals("""
Restrict conda to cached download content and file:// based urls.
"""),
'override_channels_enabled': dals("""
Permit use of the --overide-channels command-line flag.
"""),
'path_conflict': dals("""
The method by which conda handle's conflicting/overlapping paths during a
create, install, or update operation. The value must be one of 'clobber',
'warn', or 'prevent'. The '--clobber' command-line flag or clobber
configuration parameter overrides path_conflict set to 'prevent'.
"""),
'pinned_packages': dals("""
A list of package specs to pin for every environment resolution.
This parameter is in BETA, and its behavior may change in a future release.
"""),
'pip_interop_enabled': dals("""
Allow the conda solver to interact with non-conda-installed python packages.
"""),
'pkgs_dirs': dals("""
The list of directories where locally-available packages are linked from at
install time. Packages not locally available are downloaded and extracted
into the first writable directory.
"""),
'proxy_servers': dals("""
A mapping to enable proxy settings. Keys can be either (1) a scheme://hostname
form, which will match any request to the given scheme and exact hostname, or
(2) just a scheme, which will match requests to that scheme. Values are are
the actual proxy server, and are of the form
'scheme://[user:password@]host[:port]'. The optional 'user:password' inclusion
enables HTTP Basic Auth with your proxy.
"""),
'quiet': dals("""
Disable progress bar display and other output.
"""),
'remote_connect_timeout_secs': dals("""
The number seconds conda will wait for your client to establish a connection
to a remote url resource.
"""),
'remote_max_retries': dals("""
The maximum number of retries each HTTP connection should attempt.
"""),
'remote_backoff_factor': dals("""
The factor determines the time HTTP connection should wait for attempt.
"""),
'remote_read_timeout_secs': dals("""
Once conda has connected to a remote resource and sent an HTTP request, the
read timeout is the number of seconds conda will wait for the server to send
a response.
"""),
'repodata_threads': dals("""
Threads to use when downloading and reading repodata. When not set,
defaults to None, which uses the default ThreadPoolExecutor behavior.
"""),
'report_errors': dals("""
Opt in, or opt out, of automatic error reporting to core maintainers. Error
reports are anonymous, with only the error stack trace and information given
by `conda info` being sent.
"""),
'restore_free_channel': dals(""""
Add the "free" channel back into defaults, behind "main" in priority. The "free"
channel was removed from the collection of default channels in conda 4.7.0.
"""),
'rollback_enabled': dals("""
Should any error occur during an unlink/link transaction, revert any disk
mutations made to that point in the transaction.
"""),
'safety_checks': dals("""
Enforce available safety guarantees during package installation.
The value must be one of 'enabled', 'warn', or 'disabled'.
"""),
'separate_format_cache': dals("""
Treat .tar.bz2 files as different from .conda packages when
filenames are otherwise similar. This defaults to False, so
that your package cache doesn't churn when rolling out the new
package format. If you'd rather not assume that a .tar.bz2 and
.conda from the same place represent the same content, set this
to True.
"""),
'extra_safety_checks': dals("""
Spend extra time validating package contents. Currently, runs sha256 verification
on every file within each package during installation.
"""),
'shortcuts': dals("""
Allow packages to create OS-specific shortcuts (e.g. in the Windows Start
Menu) at install time.
"""),
'show_channel_urls': dals("""
Show channel URLs when displaying what is going to be downloaded.
"""),
'ssl_verify': dals("""
Conda verifies SSL certificates for HTTPS requests, just like a web
browser. By default, SSL verification is enabled, and conda operations will
fail if a required url's certificate cannot be verified. Setting ssl_verify to
False disables certification verification. The value for ssl_verify can also
be (1) a path to a CA bundle file, or (2) a path to a directory containing
certificates of trusted CA.
"""),
'track_features': dals("""
A list of features that are tracked by default. An entry here is similar to
adding an entry to the create_default_packages list.
"""),
'repodata_fns': dals("""
Specify filenames for repodata fetching. The default is ('current_repodata.json',
'repodata.json'), which tries a subset of the full index containing only the
latest version for each package, then falls back to repodata.json. You may
want to specify something else to use an alternate index that has been reduced
somehow.
"""),
'use_index_cache': dals("""
Use cache of channel index files, even if it has expired.
"""),
'use_only_tar_bz2': dals("""
A boolean indicating that only .tar.bz2 conda packages should be downloaded.
This is forced to True if conda-build is installed and older than 3.18.3,
because older versions of conda break when conda feeds it the new file format.
"""),
'verbosity': dals("""
Sets output log level. 0 is warn. 1 is info. 2 is debug. 3 is trace.
"""),
'verify_threads': dals("""
Threads to use when performing the transaction verification step. When not set,
defaults to 1.
"""),
'whitelist_channels': dals("""
The exclusive list of channels allowed to be used on the system. Use of any
other channels will result in an error. If conda-build channels are to be
allowed, along with the --use-local command line flag, be sure to include the
'local' channel in the list. If the list is empty or left undefined, no
channel exclusions will be enforced.
"""),
'unsatisfiable_hints': dals("""
A boolean to determine if conda should find conflicting packages in the case
of a failed install.
"""),
})
def conda_in_private_env():
# conda is located in its own private environment named '_conda_'
envs_dir, env_name = path_split(sys.prefix)
return env_name == '_conda_' and basename(envs_dir) == 'envs'
def reset_context(search_path=SEARCH_PATH, argparse_args=None):
global context
context.__init__(search_path, argparse_args)
context.__dict__.pop('_Context__conda_build', None)
from ..models.channel import Channel
Channel._reset_state()
# need to import here to avoid circular dependency
return context
class ContextStackObject(object):
def __init__(self, search_path=SEARCH_PATH, argparse_args=None):
self.set_value(search_path, argparse_args)
def set_value(self, search_path=SEARCH_PATH, argparse_args=None):
self.search_path = search_path
self.argparse_args = argparse_args
def apply(self):
reset_context(self.search_path, self.argparse_args)
class ContextStack(object):
def __init__(self):
self._stack = [ContextStackObject() for _ in range(3)]
self._stack_idx = 0
self._last_search_path = None
self._last_argparse_args = None
def push(self, search_path, argparse_args):
self._stack_idx += 1
old_len = len(self._stack)
if self._stack_idx >= old_len:
self._stack.extend([ContextStackObject() for _ in range(old_len)])
self._stack[self._stack_idx].set_value(search_path, argparse_args)
self.apply()
def apply(self):
if self._last_search_path != self._stack[self._stack_idx].search_path or \
self._last_argparse_args != self._stack[self._stack_idx].argparse_args:
# Expensive:
self._stack[self._stack_idx].apply()
self._last_search_path = self._stack[self._stack_idx].search_path
self._last_argparse_args = self._stack[self._stack_idx].argparse_args
def pop(self):
self._stack_idx -= 1
self._stack[self._stack_idx].apply()
def replace(self, search_path, argparse_args):
self._stack[self._stack_idx].set_value(search_path, argparse_args)
self._stack[self._stack_idx].apply()
context_stack = ContextStack()
def stack_context(pushing, search_path=SEARCH_PATH, argparse_args=None):
if pushing:
# Fast
context_stack.push(search_path, argparse_args)
else:
# Slow
context_stack.pop()
# Default means "The configuration when there are no condarc files present". It is
# all the settings and defaults that are built in to the code and *not* the default
# value of search_path=SEARCH_PATH. It means search_path=().
def stack_context_default(pushing, argparse_args=None):
return stack_context(pushing, search_path=(), argparse_args=argparse_args)
def replace_context(pushing=None, search_path=SEARCH_PATH, argparse_args=None):
# pushing arg intentionally not used here, but kept for API compatibility
return context_stack.replace(search_path, argparse_args)
def replace_context_default(pushing=None, argparse_args=None):
# pushing arg intentionally not used here, but kept for API compatibility
return context_stack.replace(search_path=(), argparse_args=argparse_args)
# Tests that want to only declare 'I support the project-wide default for how to
# manage stacking of contexts'. Tests that are known to be careful with context
# can use `replace_context_default` which might be faster, though it should
# be a stated goal to set conda_tests_ctxt_mgmt_def_pol to replace_context_default
# and not to stack_context_default.
conda_tests_ctxt_mgmt_def_pol = replace_context_default
@memoize
def _get_cpu_info():
# DANGER: This is rather slow
from .._vendor.cpuinfo import get_cpu_info
return frozendict(get_cpu_info())
def env_name(prefix):
# counter part to `locate_prefix_by_name()` below
if not prefix:
return None
if paths_equal(prefix, context.root_prefix):
return ROOT_ENV_NAME
maybe_envs_dir, maybe_name = path_split(prefix)
for envs_dir in context.envs_dirs:
if paths_equal(envs_dir, maybe_envs_dir):
return maybe_name
return prefix
def locate_prefix_by_name(name, envs_dirs=None):
"""Find the location of a prefix given a conda env name. If the location does not exist, an
error is raised.
"""
assert name
if name in (ROOT_ENV_NAME, 'root'):
return context.root_prefix
if envs_dirs is None:
envs_dirs = context.envs_dirs
for envs_dir in envs_dirs:
if not isdir(envs_dir):
continue
prefix = join(envs_dir, name)
if isdir(prefix):
return abspath(prefix)
from ..exceptions import EnvironmentNameNotFound
raise EnvironmentNameNotFound(name)
def determine_target_prefix(ctx, args=None):
"""Get the prefix to operate in. The prefix may not yet exist.
Args:
ctx: the context of conda
args: the argparse args from the command line
Returns: the prefix
Raises: CondaEnvironmentNotFoundError if the prefix is invalid
"""
argparse_args = args or ctx._argparse_args
try:
prefix_name = argparse_args.name
except AttributeError:
prefix_name = None
try:
prefix_path = argparse_args.prefix
except AttributeError:
prefix_path = None
if prefix_name is not None and not prefix_name.strip(): # pragma: no cover
from ..exceptions import ArgumentError
raise ArgumentError("Argument --name requires a value.")
if prefix_path is not None and not prefix_path.strip(): # pragma: no cover
from ..exceptions import ArgumentError
raise ArgumentError("Argument --prefix requires a value.")
if prefix_name is None and prefix_path is None:
return ctx.default_prefix
elif prefix_path is not None:
return expand(prefix_path)
else:
disallowed_chars = ('/', ' ', ':', '#')
if any(_ in prefix_name for _ in disallowed_chars):
from ..exceptions import CondaValueError
builder = ["Invalid environment name: '" + prefix_name + "'"]
builder.append(" Characters not allowed: {}".format(disallowed_chars))
raise CondaValueError("\n".join(builder))
if prefix_name in (ROOT_ENV_NAME, 'root'):
return ctx.root_prefix
else:
from ..exceptions import EnvironmentNameNotFound
try:
return locate_prefix_by_name(prefix_name)
except EnvironmentNameNotFound:
return join(_first_writable_envs_dir(), prefix_name)
def _first_writable_envs_dir():
# Calling this function will *create* an envs directory if one does not already
# exist. Any caller should intend to *use* that directory for *writing*, not just reading.
for envs_dir in context.envs_dirs:
if envs_dir == os.devnull:
continue
# The magic file being used here could change in the future. Don't write programs
# outside this code base that rely on the presence of this file.
# This value is duplicated in conda.gateways.disk.create.create_envs_directory().
envs_dir_magic_file = join(envs_dir, '.conda_envs_dir_test')
if isfile(envs_dir_magic_file):
try:
open(envs_dir_magic_file, 'a').close()
return envs_dir
except (IOError, OSError):
log.trace("Tried envs_dir but not writable: %s", envs_dir)
else:
from ..gateways.disk.create import create_envs_directory
was_created = create_envs_directory(envs_dir)
if was_created:
return envs_dir
from ..exceptions import NoWritableEnvsDirError
raise NoWritableEnvsDirError(context.envs_dirs)
# backward compatibility for conda-build
def get_prefix(ctx, args, search=True): # pragma: no cover
return determine_target_prefix(ctx or context, args)
try:
context = Context((), None)
except ConfigurationLoadError as e: # pragma: no cover
print(repr(e), file=sys.stderr)
# Exception handler isn't loaded so use sys.exit
sys.exit(1)
| []
| []
| [
"CONDA_SHLVL",
"CONDA_DEFAULT_ENV",
"CONDA_PREFIX",
"PYTHONPATH"
]
| [] | ["CONDA_SHLVL", "CONDA_DEFAULT_ENV", "CONDA_PREFIX", "PYTHONPATH"] | python | 4 | 0 | |
upgrade/upgrade.go | package upgrade
import (
"fmt"
"os"
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/flexiant/kdeploy/fetchers"
"github.com/flexiant/kdeploy/template"
"github.com/flexiant/kdeploy/upgrade/strategies"
"github.com/flexiant/kdeploy/utils"
"github.com/flexiant/kdeploy/webservice"
"github.com/hashicorp/go-version"
)
// CmdUpgrade implements 'upgrade' command
func CmdUpgrade(c *cli.Context) {
var kubeware string
var localKubePath string
var err error
kubeware = os.Getenv("KDEPLOY_KUBEWARE")
localKubePath, err = fetchers.Fetch(kubeware)
if err != nil {
log.Fatal(fmt.Errorf("Could not fetch kubeware: '%s' (%v)", kubeware, err))
}
log.Debugf("Going to parse kubeware in %s", localKubePath)
md := template.ParseMetadata(localKubePath)
utils.CheckError(err)
kubernetes, err := webservice.NewKubeClient()
utils.CheckError(err)
namespace := os.Getenv("KDEPLOY_NAMESPACE")
// labelSelector := fmt.Sprintf("kubeware=%s,kubeware-version=%s", md.Name, md.Version)
// Check if kubeware already installed, error if it's not
v, err := kubernetes.FindDeployedKubewareVersion(namespace, md.Name)
utils.CheckError(err)
if v == "" {
log.Fatalf("Kubeware '%s.%s' is not deployed and thus it can't be upgraded", namespace, md.Name)
}
log.Infof("Found version %s of %s.%s", v, namespace, md.Name)
// Check if equal or newer version already exists, error if so
deployedVersion, err := version.NewVersion(v)
utils.CheckError(err)
upgradeVersion, err := version.NewVersion(md.Version)
utils.CheckError(err)
if upgradeVersion.LessThan(deployedVersion) {
log.Fatalf("Can not upgrade to version '%s' since version '%s' is already deployed", md.Version, v)
}
// build attributes merging "role list" to defaults
log.Debugf("Building attributes")
defaults, err := md.AttributeDefaults()
utils.CheckError(err)
attributes := template.BuildAttributes(c.String("attribute"), defaults)
// get services and parse each one
log.Debugf("Parsing services")
servicesSpecs, err := md.ParseServices(attributes)
utils.CheckError(err)
// get replica controllers and parse each one
log.Debugf("Parsing controllers")
controllersSpecs, err := md.ParseControllers(attributes)
utils.CheckError(err)
// upgStrategy := upgradeStrategies.RecreateAllStrategy(kubernetes)
// upgStrategy := upgradeStrategies.RollRcPatchSvcStrategy(kubernetes, 1)
upgStrategy := upgradeStrategies.BuildUpgradeStrategy(os.Getenv("KDEPLOY_UPGRADE_STRATEGY"), kubernetes)
upgStrategy.Upgrade(namespace, servicesSpecs, controllersSpecs)
log.Infof("Kubeware '%s.%s' has been upgraded from version '%s' to '%s'", namespace, md.Name, v, md.Version)
}
| [
"\"KDEPLOY_KUBEWARE\"",
"\"KDEPLOY_NAMESPACE\"",
"\"KDEPLOY_UPGRADE_STRATEGY\""
]
| []
| [
"KDEPLOY_NAMESPACE",
"KDEPLOY_UPGRADE_STRATEGY",
"KDEPLOY_KUBEWARE"
]
| [] | ["KDEPLOY_NAMESPACE", "KDEPLOY_UPGRADE_STRATEGY", "KDEPLOY_KUBEWARE"] | go | 3 | 0 | |
desktop/core/ext-py/Django-1.2.3/tests/runtests.py | #!/usr/bin/env python
import os, sys, traceback
import unittest
import django.contrib as contrib
CONTRIB_DIR_NAME = 'django.contrib'
MODEL_TESTS_DIR_NAME = 'modeltests'
REGRESSION_TESTS_DIR_NAME = 'regressiontests'
TEST_TEMPLATE_DIR = 'templates'
CONTRIB_DIR = os.path.dirname(contrib.__file__)
MODEL_TEST_DIR = os.path.join(os.path.dirname(__file__), MODEL_TESTS_DIR_NAME)
REGRESSION_TEST_DIR = os.path.join(os.path.dirname(__file__), REGRESSION_TESTS_DIR_NAME)
REGRESSION_SUBDIRS_TO_SKIP = ['locale']
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
]
def get_test_models():
models = []
for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR), (CONTRIB_DIR_NAME, CONTRIB_DIR):
for f in os.listdir(dirpath):
if f.startswith('__init__') or f.startswith('.') or \
f.startswith('sql') or f.startswith('invalid') or \
os.path.basename(f) in REGRESSION_SUBDIRS_TO_SKIP:
continue
models.append((loc, f))
return models
def get_invalid_models():
models = []
for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR), (CONTRIB_DIR_NAME, CONTRIB_DIR):
for f in os.listdir(dirpath):
if f.startswith('__init__') or f.startswith('.') or f.startswith('sql'):
continue
if f.startswith('invalid'):
models.append((loc, f))
return models
class InvalidModelTestCase(unittest.TestCase):
def __init__(self, model_label):
unittest.TestCase.__init__(self)
self.model_label = model_label
def runTest(self):
from django.core.management.validation import get_validation_errors
from django.db.models.loading import load_app
from cStringIO import StringIO
try:
module = load_app(self.model_label)
except Exception, e:
self.fail('Unable to load invalid model module')
# Make sure sys.stdout is not a tty so that we get errors without
# coloring attached (makes matching the results easier). We restore
# sys.stderr afterwards.
orig_stdout = sys.stdout
s = StringIO()
sys.stdout = s
count = get_validation_errors(s, module)
sys.stdout = orig_stdout
s.seek(0)
error_log = s.read()
actual = error_log.split('\n')
expected = module.model_errors.split('\n')
unexpected = [err for err in actual if err not in expected]
missing = [err for err in expected if err not in actual]
self.assert_(not unexpected, "Unexpected Errors: " + '\n'.join(unexpected))
self.assert_(not missing, "Missing Errors: " + '\n'.join(missing))
def django_tests(verbosity, interactive, failfast, test_labels):
from django.conf import settings
old_installed_apps = settings.INSTALLED_APPS
old_root_urlconf = getattr(settings, "ROOT_URLCONF", "")
old_template_dirs = settings.TEMPLATE_DIRS
old_use_i18n = settings.USE_I18N
old_login_url = settings.LOGIN_URL
old_language_code = settings.LANGUAGE_CODE
old_middleware_classes = settings.MIDDLEWARE_CLASSES
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), TEST_TEMPLATE_DIR),)
settings.USE_I18N = True
settings.LANGUAGE_CODE = 'en'
settings.LOGIN_URL = '/accounts/login/'
settings.MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware',
)
settings.SITE_ID = 1
# For testing comment-utils, we require the MANAGERS attribute
# to be set, so that a test email is sent out which we catch
# in our tests.
settings.MANAGERS = ("[email protected]",)
# Load all the ALWAYS_INSTALLED_APPS.
# (This import statement is intentionally delayed until after we
# access settings because of the USE_I18N dependency.)
from django.db.models.loading import get_apps, load_app
get_apps()
# Load all the test model apps.
test_labels_set = set([label.split('.')[0] for label in test_labels])
for model_dir, model_name in get_test_models():
model_label = '.'.join([model_dir, model_name])
# if the model was named on the command line, or
# no models were named (i.e., run all), import
# this model and add it to the list to test.
if not test_labels or model_name in test_labels_set:
if verbosity >= 1:
print "Importing model %s" % model_name
mod = load_app(model_label)
if mod:
if model_label not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(model_label)
# Add tests for invalid models.
extra_tests = []
for model_dir, model_name in get_invalid_models():
model_label = '.'.join([model_dir, model_name])
if not test_labels or model_name in test_labels:
extra_tests.append(InvalidModelTestCase(model_label))
try:
# Invalid models are not working apps, so we cannot pass them into
# the test runner with the other test_labels
test_labels.remove(model_name)
except ValueError:
pass
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
TestRunner = get_runner(settings)
if hasattr(TestRunner, 'func_name'):
# Pre 1.2 test runners were just functions,
# and did not support the 'failfast' option.
import warnings
warnings.warn(
'Function-based test runners are deprecated. Test runners should be classes with a run_tests() method.',
PendingDeprecationWarning
)
failures = TestRunner(test_labels, verbosity=verbosity, interactive=interactive,
extra_tests=extra_tests)
else:
test_runner = TestRunner(verbosity=verbosity, interactive=interactive, failfast=failfast)
failures = test_runner.run_tests(test_labels, extra_tests=extra_tests)
if failures:
sys.exit(bool(failures))
# Restore the old settings.
settings.INSTALLED_APPS = old_installed_apps
settings.ROOT_URLCONF = old_root_urlconf
settings.TEMPLATE_DIRS = old_template_dirs
settings.USE_I18N = old_use_i18n
settings.LANGUAGE_CODE = old_language_code
settings.LOGIN_URL = old_login_url
settings.MIDDLEWARE_CLASSES = old_middleware_classes
if __name__ == "__main__":
from optparse import OptionParser
usage = "%prog [options] [model model model ...]"
parser = OptionParser(usage=usage)
parser.add_option('-v','--verbosity', action='store', dest='verbosity', default='0',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option('--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed test.')
parser.add_option('--settings',
help='Python path to settings module, e.g. "myproject.settings". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.')
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
elif "DJANGO_SETTINGS_MODULE" not in os.environ:
parser.error("DJANGO_SETTINGS_MODULE is not set in the environment. "
"Set it or use --settings.")
django_tests(int(options.verbosity), options.interactive, options.failfast, args)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
config/settings/common.py | # -*- coding: utf-8 -*-
"""
Django settings for aago-ranking project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('aago_ranking')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'aago_ranking.users', # custom users app
'aago_ranking.games',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'aago_ranking.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Ignacio Rossi""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='postgres:///aago_ranking'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'aago_ranking.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'aago_ranking.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ('aago_ranking.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
########## END CELERY
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
| []
| []
| []
| [] | [] | python | 0 | 0 | |
gunicorn/arbiter.py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import with_statement
import errno
import os
import select
import signal
import sys
import time
import traceback
from gunicorn.errors import HaltServer
from gunicorn.pidfile import Pidfile
from gunicorn.sock import create_socket
from gunicorn import util
from gunicorn import __version__, SERVER_SOFTWARE
class Arbiter(object):
"""
Arbiter maintain the workers processes alive. It launches or
kills them if needed. It also manages application reloading
via SIGHUP/USR2.
"""
# A flag indicating if a worker failed to
# to boot. If a worker process exist with
# this error code, the arbiter will terminate.
WORKER_BOOT_ERROR = 3
START_CTX = {}
LISTENER = None
WORKERS = {}
PIPE = []
# I love dynamic languages
SIG_QUEUE = []
SIGNALS = map(
lambda x: getattr(signal, "SIG%s" % x),
"HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()
)
SIG_NAMES = dict(
(getattr(signal, name), name[3:].lower()) for name in dir(signal)
if name[:3] == "SIG" and name[3] != "_"
)
def __init__(self, app):
os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE
self.setup(app)
self.pidfile = None
self.worker_age = 0
self.reexec_pid = 0
self.master_name = "Master"
# get current path, try to use PWD env first
try:
a = os.stat(os.environ['PWD'])
b = os.stat(os.getcwd())
if a.ino == b.ino and a.dev == b.dev:
cwd = os.environ['PWD']
else:
cwd = os.getcwd()
except:
cwd = os.getcwd()
args = sys.argv[:]
args.insert(0, sys.executable)
# init start context
self.START_CTX = {
"args": args,
"cwd": cwd,
0: sys.executable
}
def setup(self, app):
self.app = app
self.cfg = app.cfg
self.log = self.cfg.logger_class(app.cfg)
# reopen files
self.log.reopen_files()
self.address = self.cfg.address
self.num_workers = self.cfg.workers
self.debug = self.cfg.debug
self.timeout = self.cfg.timeout
self.proc_name = self.cfg.proc_name
self.worker_class = self.cfg.worker_class
if self.cfg.debug:
self.log.debug("Current configuration:")
for config, value in sorted(self.cfg.settings.iteritems()):
self.log.debug(" %s: %s", config, value.value)
if self.cfg.preload_app:
if not self.cfg.debug:
self.app.wsgi()
else:
self.log.warning("debug mode: app isn't preloaded.")
def start(self):
"""\
Initialize the arbiter. Start listening and set pidfile if needed.
"""
self.log.info("Starting gunicorn %s", __version__)
self.cfg.on_starting(self)
self.pid = os.getpid()
self.init_signals()
if not self.LISTENER:
self.LISTENER = create_socket(self.cfg, self.log)
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.pidfile.create(self.pid)
self.log.debug("Arbiter booted")
self.log.info("Listening at: %s (%s)", self.LISTENER,
self.pid)
self.log.info("Using worker: %s",
self.cfg.settings['worker_class'].get())
self.cfg.when_ready(self)
def init_signals(self):
"""\
Initialize master signal handling. Most of the signals
are queued. Child signals only wake up the master.
"""
if self.PIPE:
map(os.close, self.PIPE)
self.PIPE = pair = os.pipe()
map(util.set_non_blocking, pair)
map(util.close_on_exec, pair)
self.log.close_on_exec()
map(lambda s: signal.signal(s, self.signal), self.SIGNALS)
signal.signal(signal.SIGCHLD, self.handle_chld)
def signal(self, sig, frame):
if len(self.SIG_QUEUE) < 5:
self.SIG_QUEUE.append(sig)
self.wakeup()
def run(self):
"Main master loop."
self.start()
util._setproctitle("master [%s]" % self.proc_name)
self.manage_workers()
while True:
try:
self.reap_workers()
sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None
if sig is None:
self.sleep()
self.murder_workers()
self.manage_workers()
continue
if sig not in self.SIG_NAMES:
self.log.info("Ignoring unknown signal: %s", sig)
continue
signame = self.SIG_NAMES.get(sig)
handler = getattr(self, "handle_%s" % signame, None)
if not handler:
self.log.error("Unhandled signal: %s", signame)
continue
self.log.info("Handling signal: %s", signame)
handler()
self.wakeup()
except StopIteration:
self.halt()
except KeyboardInterrupt:
self.halt()
except HaltServer, inst:
self.halt(reason=inst.reason, exit_status=inst.exit_status)
except SystemExit:
raise
except Exception:
self.log.info("Unhandled exception in main loop:\n%s",
traceback.format_exc())
self.stop(False)
if self.pidfile is not None:
self.pidfile.unlink()
sys.exit(-1)
def handle_chld(self, sig, frame):
"SIGCHLD handling"
self.wakeup()
def handle_hup(self):
"""\
HUP handling.
- Reload configuration
- Start the new worker processes with a new configuration
- Gracefully shutdown the old worker processes
"""
self.log.info("Hang up: %s", self.master_name)
self.reload()
def handle_quit(self):
"SIGQUIT handling"
raise StopIteration
def handle_int(self):
"SIGINT handling"
self.stop(False)
raise StopIteration
def handle_term(self):
"SIGTERM handling"
self.stop(False)
raise StopIteration
def handle_ttin(self):
"""\
SIGTTIN handling.
Increases the number of workers by one.
"""
self.num_workers += 1
self.manage_workers()
def handle_ttou(self):
"""\
SIGTTOU handling.
Decreases the number of workers by one.
"""
if self.num_workers <= 1:
return
self.num_workers -= 1
self.manage_workers()
def handle_usr1(self):
"""\
SIGUSR1 handling.
Kill all workers by sending them a SIGUSR1
"""
self.kill_workers(signal.SIGUSR1)
self.log.reopen_files()
def handle_usr2(self):
"""\
SIGUSR2 handling.
Creates a new master/worker set as a slave of the current
master without affecting old workers. Use this to do live
deployment with the ability to backout a change.
"""
self.reexec()
def handle_winch(self):
"SIGWINCH handling"
if os.getppid() == 1 or os.getpgrp() != os.getpid():
self.log.info("graceful stop of workers")
self.num_workers = 0
self.kill_workers(signal.SIGQUIT)
else:
self.log.info("SIGWINCH ignored. Not daemonized")
def wakeup(self):
"""\
Wake up the arbiter by writing to the PIPE
"""
try:
os.write(self.PIPE[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def halt(self, reason=None, exit_status=0):
""" halt arbiter """
self.stop()
self.log.info("Shutting down: %s", self.master_name)
if reason is not None:
self.log.info("Reason: %s", reason)
if self.pidfile is not None:
self.pidfile.unlink()
sys.exit(exit_status)
def sleep(self):
"""\
Sleep until PIPE is readable or we timeout.
A readable PIPE means a signal occurred.
"""
try:
ready = select.select([self.PIPE[0]], [], [], 1.0)
if not ready[0]:
return
while os.read(self.PIPE[0], 1):
pass
except select.error, e:
if e[0] not in [errno.EAGAIN, errno.EINTR]:
raise
except OSError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
except KeyboardInterrupt:
sys.exit()
def stop(self, graceful=True):
"""\
Stop workers
:attr graceful: boolean, If True (the default) workers will be
killed gracefully (ie. trying to wait for the current connection)
"""
try:
self.LISTENER.close()
except Exception:
pass
self.LISTENER = None
sig = signal.SIGQUIT
if not graceful:
sig = signal.SIGTERM
limit = time.time() + self.timeout
while self.WORKERS and time.time() < limit:
self.kill_workers(sig)
time.sleep(0.1)
self.reap_workers()
self.kill_workers(signal.SIGKILL)
def reexec(self):
"""\
Relaunch the master and workers.
"""
if self.pidfile is not None:
self.pidfile.rename("%s.oldbin" % self.pidfile.fname)
self.reexec_pid = os.fork()
if self.reexec_pid != 0:
self.master_name = "Old Master"
return
os.environ['GUNICORN_FD'] = str(self.LISTENER.fileno())
os.chdir(self.START_CTX['cwd'])
self.cfg.pre_exec(self)
util.closerange(3, self.LISTENER.fileno())
util.closerange(self.LISTENER.fileno()+1, util.get_maxfd())
os.execvpe(self.START_CTX[0], self.START_CTX['args'], os.environ)
def reload(self):
old_address = self.cfg.address
# reload conf
self.app.reload()
self.setup(self.app)
# do we need to change listener ?
if old_address != self.cfg.address:
self.LISTENER.close()
self.LISTENER = create_socket(self.cfg, self.log)
self.log.info("Listening at: %s", self.LISTENER)
# spawn new workers with new app & conf
self.cfg.on_reload(self)
# unlink pidfile
if self.pidfile is not None:
self.pidfile.unlink()
# create new pidfile
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.pidfile.create(self.pid)
# set new proc_name
util._setproctitle("master [%s]" % self.proc_name)
# manage workers
self.log.reopen_files()
self.manage_workers()
def murder_workers(self):
"""\
Kill unused/idle workers
"""
for (pid, worker) in self.WORKERS.items():
try:
if time.time() - worker.tmp.last_update() <= self.timeout:
continue
except ValueError:
continue
self.log.critical("WORKER TIMEOUT (pid:%s)", pid)
self.kill_worker(pid, signal.SIGKILL)
def reap_workers(self):
"""\
Reap workers to avoid zombie processes
"""
try:
while True:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if self.reexec_pid == wpid:
self.reexec_pid = 0
else:
# A worker said it cannot boot. We'll shutdown
# to avoid infinite start/stop cycles.
exitcode = status >> 8
if exitcode == self.WORKER_BOOT_ERROR:
reason = "Worker failed to boot."
raise HaltServer(reason, self.WORKER_BOOT_ERROR)
worker = self.WORKERS.pop(wpid, None)
if not worker:
continue
worker.tmp.close()
except OSError, e:
if e.errno == errno.ECHILD:
pass
def manage_workers(self):
"""\
Maintain the number of workers by spawning or killing
as required.
"""
if len(self.WORKERS.keys()) < self.num_workers:
self.spawn_workers()
workers = self.WORKERS.items()
workers.sort(key=lambda w: w[1].age)
while len(workers) > self.num_workers:
(pid, _) = workers.pop(0)
self.kill_worker(pid, signal.SIGQUIT)
def spawn_worker(self):
self.worker_age += 1
worker = self.worker_class(self.worker_age, self.pid, self.LISTENER,
self.app, self.timeout/2.0,
self.cfg, self.log)
self.cfg.pre_fork(self, worker)
pid = os.fork()
if pid != 0:
self.WORKERS[pid] = worker
return pid
# Process Child
worker_pid = os.getpid()
try:
util._setproctitle("worker [%s]" % self.proc_name)
self.log.info("Booting worker with pid: %s", worker_pid)
self.cfg.post_fork(self, worker)
worker.init_process()
sys.exit(0)
except SystemExit:
raise
except:
self.log.exception("Exception in worker process:")
if not worker.booted:
sys.exit(self.WORKER_BOOT_ERROR)
sys.exit(-1)
finally:
self.log.info("Worker exiting (pid: %s)", worker_pid)
try:
worker.tmp.close()
self.cfg.worker_exit(self, worker)
except:
pass
def spawn_workers(self):
"""\
Spawn new workers as needed.
This is where a worker process leaves the main loop
of the master process.
"""
for i in range(self.num_workers - len(self.WORKERS.keys())):
self.spawn_worker()
def kill_workers(self, sig):
"""\
Kill all workers with the signal `sig`
:attr sig: `signal.SIG*` value
"""
for pid in self.WORKERS.keys():
self.kill_worker(pid, sig)
def kill_worker(self, pid, sig):
"""\
Kill a worker
:attr pid: int, worker pid
:attr sig: `signal.SIG*` value
"""
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
try:
worker = self.WORKERS.pop(pid)
worker.tmp.close()
self.cfg.worker_exit(self, worker)
return
except (KeyError, OSError):
return
raise
| []
| []
| [
"GUNICORN_FD",
"PWD",
"SERVER_SOFTWARE"
]
| [] | ["GUNICORN_FD", "PWD", "SERVER_SOFTWARE"] | python | 3 | 0 | |
scripts/advanced_collectible/deploy_advanced.py | #!/usr/bin/python3
from brownie import AdvancedCollectible, accounts, network, config
from scripts.helpful_scripts import fund_advanced_collectible
def main():
print(config["wallets"]["from_key"])
dev = accounts.add(config["wallets"]["from_key"])
print(network.show_active())
# publish_source = True if os.getenv("ETHERSCAN_TOKEN") else False # Currently having an issue with this
publish_source = False
advanced_collectible = AdvancedCollectible.deploy(
config["networks"][network.show_active()]["vrf_coordinator"],
config["networks"][network.show_active()]["link_token"],
config["networks"][network.show_active()]["keyhash"],
{"from": dev},
publish_source=publish_source,
)
fund_advanced_collectible(advanced_collectible)
return advanced_collectible
| []
| []
| [
"ETHERSCAN_TOKEN"
]
| [] | ["ETHERSCAN_TOKEN"] | python | 1 | 0 | |
source/operators/comprehend/key_phrases/get_key_phrases.py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import boto3
from botocore import config
import tarfile
import os
import json
from io import BytesIO
from MediaInsightsEngineLambdaHelper import MediaInsightsOperationHelper
from MediaInsightsEngineLambdaHelper import MasExecutionError
from MediaInsightsEngineLambdaHelper import DataPlane
mie_config = json.loads(os.environ['botoConfig'])
config = config.Config(**mie_config)
comprehend = boto3.client('comprehend', config=config)
s3_client = boto3.client('s3')
headers = {"Content-Type": "application/json"}
def read_from_s3(bucket, key):
try:
obj = s3_client.get_object(
Bucket=bucket,
Key=key
)
except Exception as e:
print("Exception occurred while reading asset metadata from s3")
return {"Status": "Error", "Message": e}
else:
results = obj['Body'].read()
return {"Status": "Success", "Object": results}
def lambda_handler(event, context):
print("We got this event:\n", event)
operator_object = MediaInsightsOperationHelper(event)
try:
job_id = operator_object.metadata["comprehend_phrases_job_id"]
asset_id = operator_object.asset_id
workflow_id = operator_object.workflow_execution_id
# If Comprehend wasn't run due to empty text input, then we're done
if job_id == "Empty input --> empty output.":
operator_object.update_workflow_status("Complete")
return operator_object.return_output_object()
except KeyError:
operator_object.update_workflow_status("Error")
operator_object.add_workflow_metadata(comprehend_error="No valid job id")
raise MasExecutionError(operator_object.return_output_object())
try:
response = comprehend.list_key_phrases_detection_jobs(
Filter={
'JobName': job_id,
},
)
except Exception as e:
operator_object.update_workflow_status("Error")
operator_object.add_workflow_metadata(comprehend_error="Unable to get response from comprehend: {e}".format(e=e))
raise MasExecutionError(operator_object.return_output_object())
else:
print(response)
comprehend_status = response["KeyPhrasesDetectionJobPropertiesList"][0]["JobStatus"]
if comprehend_status == "SUBMITTED" or comprehend_status == "IN_PROGRESS":
operator_object.add_workflow_metadata(comprehend_phrases_job_id=job_id)
operator_object.update_workflow_status("Executing")
return operator_object.return_output_object()
elif comprehend_status == "COMPLETED":
output_uri = response["KeyPhrasesDetectionJobPropertiesList"][0]["OutputDataConfig"]["S3Uri"]
delimeter = '/'
bucket = delimeter.join(output_uri.split(delimeter)[2:3])
file_name = output_uri.split(delimeter)[-1]
key = delimeter.join(output_uri.split(delimeter)[3:-1]) + '/' + file_name
comprehend_tarball = read_from_s3(bucket, key)
comprehend_data = {"LanguageCode": response['KeyPhrasesDetectionJobPropertiesList'][0]['LanguageCode'], "Results": []}
if comprehend_tarball["Status"] == "Success":
input_bytes = comprehend_tarball["Object"]
with tarfile.open(fileobj=BytesIO(input_bytes)) as tf:
for member in tf:
if member.isfile():
comprehend_data["Results"].append(tf.extractfile(member).read().decode('utf-8'))
dataplane = DataPlane()
metadata_upload = dataplane.store_asset_metadata(asset_id, "key_phrases", workflow_id, comprehend_data)
if "Status" not in metadata_upload:
operator_object.update_workflow_status("Error")
operator_object.add_workflow_metadata(
comprehend_error="Unable to store key phrases data {e}".format(e=metadata_upload))
raise MasExecutionError(operator_object.return_output_object())
else:
if metadata_upload["Status"] == "Success":
operator_object.add_workflow_metadata(comprehend_entity_job_id=job_id, output_uri=output_uri)
operator_object.update_workflow_status("Complete")
return operator_object.return_output_object()
else:
operator_object.update_workflow_status("Error")
operator_object.add_workflow_metadata(comprehend_error="Unable to store key phrases data {e}".format(e=metadata_upload))
raise MasExecutionError(operator_object.return_output_object())
else:
operator_object.update_workflow_status("Error")
operator_object.add_workflow_metadata(comprehend_entity_job_id=job_id, comprehend_error="could not retrieve output from s3: {e}".format(e=comprehend_tarball["Message"]))
raise MasExecutionError(operator_object.return_output_object())
else:
operator_object.update_workflow_status("Error")
operator_object.add_workflow_metadata(comprehend_phrases_job_id=job_id, comprehend_error="comprehend returned as failed: {e}".format(e=response["KeyPhrasesDetectionJobPropertiesList"][0]["Message"]))
raise MasExecutionError(operator_object.return_output_object())
| []
| []
| [
"botoConfig"
]
| [] | ["botoConfig"] | python | 1 | 0 | |
weatherbot.py | # loads .env contents
import settings
# use for approximate string matching
import difflib
import pandas as pd
import os, time, sys
import re, json
from urllib.request import urlopen
from datetime import datetime as dt
from slackclient import SlackClient
keys = {
'weatherbot': os.environ['WEATHERBOT_API_KEY'],
'openweather': os.environ['OPENWEATHER_API_KEY']
}
client = SlackClient(keys['weatherbot'])
weatherbot_id = 'U93NEAZ24'
mention_regex = "<@{}>(.*)".format(weatherbot_id)
#MENTION_REGEX = "<.*>(.*)"
#.format(('.*')
base_url = 'https://api.openweathermap.org/data/2.5/weather'
# from the world cities database : https://simplemaps.com/data/world-cities
cities = pd.read_csv('cities.csv').city
# emojis assigned to each description for great fun
emojis = {
'broken clouds': 'sun_behind_cloud',
'clear sky': 'sun_with_face',
'few clouds': 'sun_small_cloud',
'haze': 'fog',
'mist': 'fog',
'light rain': 'partly_sunny_rain',
'light snow': 'snowflake',
'moderate rain': 'umbrella_with_rain_drops',
'overcast clouds': 'cloud',
'scattered clouds': 'sun_small_cloud'
}
def get_weather(city):
"""Gets the weather data for a given city"""
# build the url string
url = '{}?APPID={}&q={}'.format(base_url,
keys['openweather'],
city.replace(' ', '+'))
# http get it
try:
res = urlopen(url)
except:
return {'error': 'url not found'}
if res.code != 200:
return {'error': 'invalid request'}
try:
data = json.loads(res.read().decode('utf8'))
except:
return {'error': 'malformed data'}
return data
def extract_message(message):
"""Extracts message content from a mention"""
matches = re.search(mention_regex, message)
print(matches)
if not (matches == None):
return matches.group(1)
def parse_command(information_recieved):
"""Parses information from RTM and extracts command and parameters"""
for item in information_recieved:
if item['type'] == "message" and not "subtype" in item:
message = extract_message(item['text'])
user = item['user']
channel = item['channel']
return message, channel, user
return None,None,None
def handle_message(message, channel, user):
"""Main method to handle weather data queries"""
# get the current time
t = str(dt.now())[:19]
# display message details
log = """
Time {}
Message {}
Channel {}
User {}
""".format(t, message, channel, user)
sys.stderr.write(log)
# check the world cities dataset for cities
# whose names are approximately the given text
#
# example: new yrk --> New York
matches = difflib.get_close_matches(message, cities)
# if a city is found, grab the data for the first match
# from the openweather API
if len(matches):
city = matches[0]
data = get_weather(city)
if not 'error' in data:
# parse main fields
desc = data['weather'][0]['description']
temp = int(data['main']['temp']) - 273 # kelvin to celsius
hum = data['main']['humidity']
#vis = data['visibility']
# add an emoji if we've got one
emoji = '' if not desc in emojis else ':{}:'.format(emojis[desc])
# format the response
header = '\n*{} Weather Report *'.format(city)
sig = '\nCheers, \n\t *Weatherbot*'
response = '\n\t'.join([
header,
'Description: {} {}'.format(desc, emoji),
'Temperature: {}'.format(temp),
'Humidity: {}'.format(hum),
#'Visibility: {}'.format(vis),
sig
])
else:
response = ':sob: I couldn\'t get any weather data for "{}"'.format(message)
else:
response = ':sob: I couldn\'t find any cities matching "{}"'.format(message)
# send the response
client.api_call(
"chat.postMessage",
channel=channel,
text=response,
user=user
)
if __name__ == "__main__":
if client.rtm_connect(with_team_state=False):
print('Weatherbot ready 2 rumbl')
while True:
message, channel, user = parse_command(client.rtm_read())
if not message == None:
handle_message(message,channel,user)
time.sleep(1)
else:
print ("Connection failed")
| []
| []
| [
"WEATHERBOT_API_KEY",
"OPENWEATHER_API_KEY"
]
| [] | ["WEATHERBOT_API_KEY", "OPENWEATHER_API_KEY"] | python | 2 | 0 | |
cmd/cpustats/main.go | /*
Copyright 2020
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"log"
"net/http"
"os"
"github.com/electrocucaracha/k8s-HorizontalPodAutoscaler-demo/pkg/router"
"github.com/rs/cors"
)
var (
version = "dev"
commit = "n/a"
date = "n/a"
)
func main() {
var port string
if port = os.Getenv("PORT"); port == "" {
port = "3000"
}
router := router.CreateRouter()
log.Println("Starting server at " + port)
log.Println("version: " + version)
log.Println("commit: " + commit)
log.Println("date: " + date)
if err := http.ListenAndServe(":"+port, cors.Default().Handler(router)); err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
CarteService/default/src/main/java/test/HelloWorld.java | package test;
import java.awt.AWTException;
import java.awt.BorderLayout;
import java.awt.Image;
import java.awt.SystemTray;
import java.awt.Toolkit;
import java.awt.TrayIcon;
import java.awt.image.ImageProducer;
import java.awt.image.MemoryImageSource;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileWriter;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Vector;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import javax.swing.JFrame;
import javax.swing.JLabel;
import org.apache.commons.collections.map.CaseInsensitiveMap;
import org.rzo.yajsw.app.WrapperJVMMain;
public class HelloWorld
{
Map m = new CaseInsensitiveMap();
static Map outOfMem = new HashMap();
static Executor executor = Executors.newSingleThreadExecutor();
static class MyWriter implements Runnable
{
public void run()
{
Thread.currentThread().setName("writer");
int i = 0;
while (i < 10)
{
System.out.println(i++);
try
{
Thread.sleep(100);
}
catch (InterruptedException e)
{
e.printStackTrace();
}
}
}
}
public static void simulateDeadlock()
{
// These are the two resource objects we'll try to get locks for
final Object resource1 = "resource1";
final Object resource2 = "resource2";
// Here's the first thread. It tries to lock resource1 then resource2
Thread t1 = new Thread()
{
public void run()
{
Thread.currentThread().setName("simulate deadlock");
// Lock resource 1
synchronized (resource1)
{
System.out.println("Thread 1: locked resource 1");
// Pause for a bit, simulating some file I/O or something.
// Basically, we just want to give the other thread a chance
// to
// run. Threads and deadlock are asynchronous things, but
// we're
// trying to force deadlock to happen here...
try
{
Thread.sleep(50);
}
catch (InterruptedException e)
{
}
// Now wait 'till we can get a lock on resource 2
synchronized (resource2)
{
System.out.println("Thread 1: locked resource 2");
}
}
}
};
// Here's the second thread. It tries to lock resource2 then resource1
Thread t2 = new Thread()
{
public void run()
{
Thread.currentThread().setName("simulate deadlock 2");
// This thread locks resource 2 right away
synchronized (resource2)
{
System.out.println("Thread 2: locked resource 2");
// Then it pauses, for the same reason as the first thread
// does
try
{
Thread.sleep(50);
}
catch (InterruptedException e)
{
}
// Then it tries to lock resource1. But wait! Thread 1
// locked
// resource1, and won't release it 'till it gets a lock on
// resource2. This thread holds the lock on resource2, and
// won't
// release it 'till it gets resource1. We're at an impasse.
// Neither
// thread can run, and the program freezes up.
synchronized (resource1)
{
System.out.println("Thread 2: locked resource 1");
}
}
}
};
// Start the two threads. If all goes as planned, deadlock will occur,
// and the program will never exit.
t1.start();
t2.start();
}
// test for application main.
public static void main(final String[] args) throws Exception
{
System.out.println("TESTENV :" +System.getenv("TESTENV"));
final FileWriter fw = new FileWriter("test.txt");
Runtime.getRuntime().addShutdownHook(new Thread()
{
public void run()
{
Thread.currentThread().setName("shutdown hook");
System.out.println("TESTENV :" +System.getenv("TESTENV"));
if (WrapperJVMMain.WRAPPER_MANAGER != null)
System.out.println("stop reason: " + WrapperJVMMain.WRAPPER_MANAGER.getStopReason());
if (args.length > 0 && args[0].equals("exception"))
{
System.out.println("Exception 1");
System.out.println("Exception 2");
System.out.println("Exception 3");
}
int i = 1;
// while (i>0)
// System.out.println("asdfasd");
// Runtime.getRuntime().halt(0);
System.out.println("You wanna quit, hey?");
try
{
fw.close();
if (args.length > 0 && args[0].equals("signalStopping"))
{
System.out.println("+ sleeping");
executor.execute(new Runnable()
{
public void run()
{
while (true)
{
try
{
if (WrapperJVMMain.WRAPPER_MANAGER != null)
{
WrapperJVMMain.WRAPPER_MANAGER.signalStopping(5000);
System.out.println("signaled stopping 5000");
}
Thread.sleep(5000);
}
catch (InterruptedException e)
{
e.printStackTrace();
break;
}
}
}
}
);
Thread.sleep(60000);
System.out.println("- sleeping");
}
else if (args.length > 0 && args[0].equals("sleepStop"))
{
Thread.sleep(180000);
Runtime.getRuntime().halt(0);
}
}
catch (Exception e)
{
// TODO Auto-generated catch block
e.printStackTrace();
}
// while(true);
}
});
System.out.println("java.library.path: "+System.getProperty("java.library.path"));
if (args.length >= 1 && "crash".equals(args[0]))
{
Thread.sleep(5000);
Runtime.getRuntime().halt(99);
}
if (args.length >= 1 && "outofmem-thread".equals(args[0]))
{
int x = 0;
while (true)
{
x++;
new Thread(new Runnable()
{
public void run()
{
try
{
// System.out.println("thread up");
Thread.sleep(Long.MAX_VALUE);
System.out.println("thread down");
}
catch (InterruptedException e)
{
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}).start();
if (x % 100 == 0)
System.out.println("outofmem-thread " + x);
// Thread.sleep(10);
}
}
if (args.length >= 1 && "outofmem-heap".equals(args[0]))
{
new Thread(new Runnable()
{
public void run()
{
int i = 0;
while (true)
{
i++;
outOfMem.put(i, "aaaaaaaaaaaaaaaaaaaaa" + i);
if (i % 1000 == 0)
System.out.println("outofmem-heap " + i);
// Thread.sleep(10);
}
}
}).start();
}
if (args.length >= 1 && "appready".equals(args[0]))
{
Thread.sleep(5000);
System.out.println("calling report service startup");
if (WrapperJVMMain.WRAPPER_MANAGER != null)
WrapperJVMMain.WRAPPER_MANAGER.reportServiceStartup();
else
System.out.println("missing wrapper manager");
}
System.out.println("myenv " + System.getProperty("myenv"));
if (WrapperJVMMain.WRAPPER_MANAGER != null)
System.out.println("wrapper property: " + WrapperJVMMain.WRAPPER_MANAGER.getProperties().getProperty("wrapper.debug"));
/*
* try { Process p = Runtime.getRuntime().exec("../set.bat");
* BufferedReader in1 = new BufferedReader(new
* InputStreamReader(p.getInputStream())); String line; while ((line =
* in1.readLine()) != null) System.out.println(line); } catch (Exception
* ex) { ex.printStackTrace(); } DocumentBuilderFactory factory =
* DocumentBuilderFactory.newInstance();
* System.out.println(factory.getClass());
*/
// try
// {
// Configuration config = new BaseConfiguration();
// }
// catch (Throwable ex)
// {
// System.out.println("all ok we cannot access commons configuration");
// ex.printStackTrace();
// }
System.out.println("args:");
for (int i = 0; i < args.length; i++)
System.out.println(args[i]);
final Vector v = new Vector();
new File("test.txt").delete();
final BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
new Thread(new Runnable()
{
public void run()
{
Thread.currentThread().setName("input reader");
try
{
int i = 0;
byte[] buf = new byte[256];
while (true)
{
i++;
String line = in.readLine();
System.out.println("in > " + line);
if (line.contains("exit 0"))
{
System.out.println("exiting 0");
System.exit(0);
}
if (line.contains("exit 1"))
{
System.out.println("exiting 1");
System.exit(1);
}
if (line.contains("exit 257"))
{
System.out.println("exiting 1");
System.exit(257);
}
}
}
catch (Exception ex)
{
ex.printStackTrace();
}
System.out.println("terminated");
}
}).start();
ArrayList list = new ArrayList();
// System.out.println(Scheduler.class.getClassLoader());
// System.out.println(Configuration.class.getClassLoader());
// System.out.flush();
int i = 0;
// org.rzo.yajsw.WrapperMain.WRAPPER_MANAGER.threadDump();
try
{
// Thread.sleep(10000);
}
catch (Exception e2)
{
// TODO Auto-generated catch block
e2.printStackTrace();
}
new Thread(new MyWriter()).start();
new Thread(new MyWriter()).start();
new Thread(new MyWriter()).start();
// System.out.println(new BufferedReader(new
// InputStreamReader(System.in)).readLine());
// for (; i < 10;)
if (args.length > 0 && "reportStartup".equals(args[0]))
if (WrapperJVMMain.WRAPPER_MANAGER != null)
WrapperJVMMain.WRAPPER_MANAGER.reportServiceStartup();
if (args.length >= 1 && "deadlock".equals(args[0]))
simulateDeadlock();
if (args.length >= 1 && "tray".equals(args[0]))
startTray();
while (true)
{
i++;
System.out.println("a" + i);
System.out.flush();
// simulate jvm crash
// while (i>3)
// list.add("asfdasffsadfdsdfsaadfsasdasf");
// if (i ==20)
// org.rzo.yajsw.app.WrapperJVMMain.WRAPPER_MANAGER.restart();
if (fw != null)
try
{
// v.add(new byte[1000]);
// fw.write("" + i + "\n");
// fw.flush();
}
catch (Throwable e1)
{
// TODO Auto-generated catch block
e1.printStackTrace();
System.exit(0);
}
if (i % 2 == 0)
try
{
// WrapperJVMMain.WRAPPER_MANAGER.stop();
Thread.sleep(500);
// System.out.println("Exception");
// System.out.flush();
// Runtime.getRuntime().halt(0);
}
catch (Exception e)
{
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/*
* WrapperManager.instance.restart(); try { Thread.sleep(10000); } catch
* (InterruptedException e) { // TODO Auto-generated catch block
* e.printStackTrace(); }
*/
// System.exit(0);
// System.out.println("hello world. short test");
}
private static void startTray()
{
SystemTray tray = SystemTray.getSystemTray();
int w = 80;
int[] pix = new int[w * w];
for (int i = 0; i < w * w; i++)
pix[i] = (int) (Math.random() * 255);
ImageProducer producer = new MemoryImageSource(w, w, pix, 0, w);
Image image = Toolkit.getDefaultToolkit().createImage(producer);
TrayIcon trayIcon = new TrayIcon(image);
trayIcon.setImageAutoSize(true);
startWindow();
try
{
tray.add(trayIcon);
System.out.println("installed tray");
}
catch (AWTException e)
{
// TODO Auto-generated catch block
e.printStackTrace();
}
}
private static void startWindow()
{
JFrame frame = new JFrame("Hellow World");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
frame.getContentPane().add(new JLabel("hellow world test"), BorderLayout.CENTER);
frame.pack();
frame.setVisible(true);
}
}
| [
"\"TESTENV\"",
"\"TESTENV\""
]
| []
| [
"TESTENV"
]
| [] | ["TESTENV"] | java | 1 | 0 | |
main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/hpcloud/tail"
"github.com/tuplestream/hawkeye-client"
)
var authToken = os.Getenv("TUPLESTREAM_AUTH_TOKEN")
func main() {
fmt.Println("Sidecar started")
pattern := "/var/log/containers/*.log"
matches, err := filepath.Glob(pattern)
handleErr(err)
for _, path := range matches {
if !strings.Contains(path, "hawkeye-sidecar") {
go tailFile(path)
}
}
http.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "pong")
})
log.Fatal(http.ListenAndServe(":8889", nil))
}
func tailFile(filename string) {
log.Print("Starting to tail " + filename)
conn, writer := hawkeye.InitiateConnection(filename, authToken)
for {
if writer != nil {
break
}
log.Print("retrying connection in 5 seconds")
time.Sleep(5 * time.Second)
conn, writer = hawkeye.InitiateConnection(filename, authToken)
}
defer conn.Close()
shouldRetry := false
t, err := tail.TailFile(filename, tail.Config{Follow: true})
handleErr(err)
for line := range t.Lines {
written, err := writer.WriteString(line.Text + "\n")
if written < len(line.Text) || err != nil {
log.Print("Connection closed")
shouldRetry = true
break
}
writer.Flush()
}
if shouldRetry {
log.Print("Retrying connection for " + filename)
tailFile(filename)
}
}
func handleErr(err error) {
if err != nil {
log.Panic(err)
}
}
| [
"\"TUPLESTREAM_AUTH_TOKEN\""
]
| []
| [
"TUPLESTREAM_AUTH_TOKEN"
]
| [] | ["TUPLESTREAM_AUTH_TOKEN"] | go | 1 | 0 | |
scripts/evaluate_habitat.py | # used for habitat challenges
# CHALLENGE_CONFIG_FILE=/root/perception_module/habitat-api/configs/tasks/pointnav_gibson_val_mini.yaml python -m scripts.evaluate_habitat /mnt/logdir/normal_encoding_multienv_map
# not sure on ^
# python -m scripts.evaluate_habitat /mnt/logdir/normal_encoding_multienv_map 1
import copy
from gym import spaces
import habitat
from habitat.core.agent import Agent
import json
import numpy as np
import os
import pprint
import scipy
import evkit
from evkit.models.architectures import AtariNet, TaskonomyFeaturesOnlyNet
from evkit.models.actor_critic_module import NaivelyRecurrentACModule
from evkit.env.habitat.habitatenv import transform_observations, get_obs_space
from evkit.env.habitat.utils import STOP_VALUE
from evkit.env.util.occupancy_map import OccupancyMap
from evkit.preprocess.transforms import rescale_centercrop_resize, rescale, grayscale_rescale, cross_modal_transform, identity_transform, rescale_centercrop_resize_collated, map_pool_collated, map_pool, taskonomy_features_transform, image_to_input_collated, taskonomy_multi_features_transform
from evkit.preprocess.baseline_transforms import blind, pixels_as_state
from evkit.preprocess import TransformFactory
from evkit.rl.algo.ppo import PPO
from evkit.rl.algo.ppo_replay import PPOReplay
from evkit.rl.storage import StackedSensorDictStorage, RolloutSensorDictReplayBuffer
from evkit.rl.policy import Policy, PolicyWithBase, BackoutPolicy, JerkAvoidanceValidator, TrainedBackoutPolicy
from evkit.utils.misc import Bunch, cfg_to_md, compute_weight_norm, is_interactive, remove_whitespace, update_dict_deepcopy
from evkit.utils.random import set_seed
from evkit.utils.logging import get_subdir
import tnt.torchnet as tnt
from tnt.torchnet.logger import FileLogger
import torch
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import runpy
import sys
<<<<<<< HEAD
import scripts.benchmark as habitat_benchmark
=======
>>>>>>> f37f84194141a13cacd06f1d12b08f1a4084e45c
try:
from habitat.utils.visualizations.utils import images_to_video
except ImportError:
pass
from sacred import Experiment
ex = Experiment(name="Habitat Evaluation", interactive=False)
try:
LOG_DIR = sys.argv[1].strip()
runpy.run_module('configs.habitat', init_globals=globals())
sys.argv.pop(1)
except:
print('need to set a logdir if using weights only / loading from habitat train configs')
pass
runpy.run_module('configs.habitat_eval', init_globals=globals())
os.environ["IMAGEIO_FFMPEG_EXE"] = '/usr/bin/ffmpeg' # figure out better way to do this
def split_and_cat(imgs):
imgs_list = [imgs[:, 3*i: 3*(i+1)][0] for i in range(int(imgs.shape[1] / 3))]
imgs_output = torch.cat(imgs_list, dim=1)
return imgs_output
class HabitatAgent(Agent):
def __init__(self, ckpt_path, config_data):
# Load agent
self.action_space = spaces.Discrete(3)
if ckpt_path is not None:
checkpoint_obj = torch.load(ckpt_path)
start_epoch = checkpoint_obj["epoch"]
print("Loaded learner (epoch {}) from {}".format(start_epoch, ckpt_path), flush=True)
agent = checkpoint_obj["agent"]
else:
cfg = config_data['cfg']
perception_model = eval(cfg['learner']['perception_network'])(
cfg['learner']['num_stack'],
**cfg['learner']['perception_network_kwargs'])
base = NaivelyRecurrentACModule(
perception_unit=perception_model,
use_gru=cfg['learner']['recurrent_policy'],
internal_state_size=cfg['learner']['internal_state_size'])
actor_critic = PolicyWithBase(
base, self.action_space,
num_stack=cfg['learner']['num_stack'],
takeover=None)
if cfg['learner']['use_replay']:
agent = PPOReplay(actor_critic,
cfg['learner']['clip_param'],
cfg['learner']['ppo_epoch'],
cfg['learner']['num_mini_batch'],
cfg['learner']['value_loss_coef'],
cfg['learner']['entropy_coef'],
cfg['learner']['on_policy_epoch'],
cfg['learner']['off_policy_epoch'],
lr=cfg['learner']['lr'],
eps=cfg['learner']['eps'],
max_grad_norm=cfg['learner']['max_grad_norm'])
else:
agent = PPO(actor_critic,
cfg['learner']['clip_param'],
cfg['learner']['ppo_epoch'],
cfg['learner']['num_mini_batch'],
cfg['learner']['value_loss_coef'],
cfg['learner']['entropy_coef'],
lr=cfg['learner']['lr'],
eps=cfg['learner']['eps'],
max_grad_norm=cfg['learner']['max_grad_norm'])
weights_path = cfg['eval_kwargs']['weights_only_path']
ckpt = torch.load(weights_path)
agent.actor_critic.load_state_dict(ckpt['state_dict'])
agent.optimizer = ckpt['optimizer']
self.actor_critic = agent.actor_critic
self.takeover_policy = None
if config_data['cfg']['learner']['backout']['use_backout']:
backout_type = config_data['cfg']['learner']['backout']['backout_type']
if backout_type == 'hardcoded':
self.takeover_policy = BackoutPolicy(
patience=config_data['cfg']['learner']['backout']['patience'],
num_processes=1,
unstuck_dist=config_data['cfg']['learner']['backout']['unstuck_dist'],
randomize_actions=config_data['cfg']['learner']['backout']['randomize_actions'],
)
elif backout_type == 'trained':
backout_ckpt =config_data['cfg']['learner']['backout']['backout_ckpt_path']
assert backout_ckpt is not None, 'need a checkpoint to use a trained backout'
backout_checkpoint_obj = torch.load(backout_ckpt)
backout_start_epoch = backout_checkpoint_obj["epoch"]
print("Loaded takeover policy at (epoch {}) from {}".format(backout_start_epoch, backout_ckpt), flush=True)
backout_policy = checkpoint_obj["agent"].actor_critic
self.takeover_policy = TrainedBackoutPolicy(
patience=config_data['cfg']['learner']['backout']['patience'],
num_processes=1,
policy=backout_policy,
unstuck_dist=config_data['cfg']['learner']['backout']['unstuck_dist'],
num_takeover_steps=config_data['cfg']['learner']['backout']['num_takeover_steps'],
)
else:
assert False, f'do not recognize backout type {backout_type}'
self.actor_critic.takeover = self.takeover_policy
self.validator = None
if config_data['cfg']['learner']['validator']['use_validator']:
validator_type = config_data['cfg']['learner']['validator']['validator_type']
if validator_type == 'jerk':
self.validator = JerkAvoidanceValidator()
else:
assert False, f'do not recognize validator {validator_type}'
self.actor_critic.action_validator = self.validator
# Set up spaces
self.target_dim = config_data['cfg']['env']['env_specific_kwargs']['target_dim']
map_dim = None
self.omap = None
if config_data['cfg']['env']['use_map']:
self.map_kwargs = config_data['cfg']['env']['habitat_map_kwargs']
map_dim = 84
assert self.map_kwargs['map_building_size'] > 0, 'If we are using map in habitat, please set building size to be positive!'
obs_space = get_obs_space(image_dim=256, target_dim=self.target_dim, map_dim=map_dim)
preprocessing_fn_pre_agg = eval(config_data['cfg']['env']['transform_fn_pre_aggregation'])
self.transform_pre_agg, obs_space = preprocessing_fn_pre_agg(obs_space)
preprocessing_fn_post_agg = eval(config_data['cfg']['env']['transform_fn_post_aggregation'])
self.transform_post_agg, obs_space = preprocessing_fn_post_agg(obs_space)
self.current_obs = StackedSensorDictStorage(1,
config_data['cfg']['learner']['num_stack'],
{k: v.shape for k, v in obs_space.spaces.items()
if k in config_data['cfg']['env']['sensors']})
print(f'Stacked obs shape {self.current_obs.obs_shape}')
self.current_obs = self.current_obs.cuda()
self.actor_critic.cuda()
self.hidden_size = config_data['cfg']['learner']['internal_state_size']
self.test_recurrent_hidden_states = None
self.not_done_masks = None
self.episode_rgbs = []
self.episode_pgs = []
self.episode_entropy = []
self.episode_num = 0
self.t = 0
self.episode_lengths = []
self.episode_values = []
self.last_action = None
# Set up logging
if config_data['cfg']['saving']['logging_type'] == 'visdom':
self.mlog = tnt.logger.VisdomMeterLogger(
title=config_data['uuid'], env=config_data['uuid'], server=config_data['cfg']['saving']['visdom_server'],
port=config_data['cfg']['saving']['visdom_port'],
log_to_filename=config_data['cfg']['saving']['visdom_log_file']
)
self.use_visdom = True
elif config_data['cfg']['saving']['logging_type'] == 'tensorboard':
self.mlog = tnt.logger.TensorboardMeterLogger(
env=config_data['uuid'],
log_dir=config_data['cfg']['saving']['log_dir'],
plotstylecombined=True
)
self.use_visdom = False
else:
assert False, 'no proper logger!'
self.log_dir = config_data['cfg']['saving']['log_dir']
self.save_eval_videos = config_data['cfg']['saving']['save_eval_videos']
self.mlog.add_meter('config', tnt.meter.SingletonMeter(), ptype='text')
self.mlog.update_meter(cfg_to_md(config_data['cfg'], config_data['uuid']), meters={'config'}, phase='val')
def reset(self):
# reset hidden state and set done
self.test_recurrent_hidden_states = torch.zeros(
1, self.hidden_size
).cuda()
self.not_done_masks = torch.zeros(1, 1).cuda()
# reset observation storage (and verify)
z = torch.zeros(1, 2).cuda()
mask_out_done = { name: z for name in self.current_obs.sensor_names }
if 'global_pos' in self.current_obs.sensor_names:
mask_out_done['global_pos'] = torch.zeros(1,1).cuda()
self.current_obs.clear_done(mask_out_done)
for value in self.current_obs.peek().values():
assert torch.sum(value.peek()).item() < 1e-6, 'did not clear the curent_obs properly'
# log everything
if len(self.episode_pgs) != 0:
# log video (and save to log_dir)
if self.save_eval_videos:
images_to_video(images=self.episode_rgbs, output_dir=self.log_dir, video_name=f'test_{self.episode_num}')
self.mlog.add_meter(f'diagnostics/rollout_{self.episode_num}', tnt.meter.SingletonMeter(), ptype='video')
if self.use_visdom:
vid_path = os.path.join(self.log_dir, f'test_{self.episode_num}.mp4')
self.mlog.update_meter(vid_path, meters={f'diagnostics/rollout_{self.episode_num}'}, phase='val')
else:
print('video support for TB is weak not recommended')
rgb_tensor = torch.Tensor(self.episode_rgbs).unsqueeze(dim=0)
self.mlog.update_meter(rgb_tensor, meters={f'diagnostics/rollout_{self.episode_num}'}, phase='val')
# reset log
self.mlog.reset_meter(self.episode_num, mode='val')
# reset episode logs
self.episode_rgbs = []
self.episode_pgs = []
self.episode_values = []
self.episode_entropy = []
self.episode_lengths.append(self.t)
self.episode_num += 1
self.t = 0
self.last_action = None
def act(self, observations):
# tick
self.t += 1
# collect raw observations
self.episode_rgbs.append(copy.deepcopy(observations['rgb']))
self.episode_pgs.append(copy.deepcopy(observations['pointgoal']))
# initialize or step occupancy map
if self.map_kwargs['map_building_size'] > 0:
if self.t == 1:
self.omap = OccupancyMap(initial_pg=observations['pointgoal'], map_kwargs=self.map_kwargs)
else:
assert self.last_action is not None, 'This is not the first timestep, there must have been at least one action'
self.omap.add_pointgoal(observations['pointgoal'])
self.omap.step(self.last_action)
# hard-coded STOP
dist = observations['pointgoal'][0]
if dist <= 0.2:
return STOP_VALUE
# preprocess and get observation
observations = transform_observations(observations, target_dim=self.target_dim, omap=self.omap)
observations = self.transform_pre_agg(observations)
for k, v in observations.items():
observations[k] = np.expand_dims(v, axis=0)
observations = self.transform_post_agg(observations)
self.current_obs.insert(observations)
self.obs_stacked = {k: v.peek().cuda() for k, v in self.current_obs.peek().items()}
# log first couple agent observation
if self.t % 4 == 0 and 50 < self.t < 60:
map_output = split_and_cat(self.obs_stacked['map']) * 0.5 + 0.5
self.mlog.add_meter(f'diagnostics/map_{self.t}', tnt.meter.SingletonMeter(), ptype='image')
self.mlog.update_meter(map_output, meters={f'diagnostics/map_{self.t}'}, phase='val')
# act
with torch.no_grad():
value, action, act_log_prob, self.test_recurrent_hidden_states = self.actor_critic.act(
self.obs_stacked,
self.test_recurrent_hidden_states,
self.not_done_masks,
)
action = action.item()
self.not_done_masks = torch.ones(1, 1).cuda() # mask says not done
# log agent outputs
assert self.action_space.contains(action), 'action from model does not fit our action space'
self.last_action = action
return action
def finish_benchmark(self, metrics):
self.mlog.add_meter('diagnostics/length_hist', tnt.meter.ValueSummaryMeter(), ptype='histogram')
self.mlog.update_meter(self.episode_lengths, meters={'diagnostics/length_hist'}, phase='val')
for k, v in metrics.items():
print(k, v)
self.mlog.add_meter(f'metrics/{k}', tnt.meter.ValueSummaryMeter())
self.mlog.update_meter(v, meters={f'metrics/{k}'}, phase='val')
self.mlog.reset_meter(self.episode_num + 1, mode='val')
@ex.main
def run_cfg(cfg, uuid):
if cfg['eval_kwargs']['exp_path'] is not None:
# Process exp path
exp_paths = [cfg['eval_kwargs']['exp_path']]
# Set up config with the first exp only
metadata_dir = get_subdir(exp_paths[0], 'metadata')
config_path = os.path.join(metadata_dir, 'config.json')
# Load config
with open(config_path) as config:
config_data = json.load(config)
# Update configs
config_data['uuid'] += '_benchmark' + uuid
config_data['cfg']['saving']['log_dir'] += '/benchmark'
config_data['cfg']['saving']['visdom_log_file'] = os.path.join(config_data['cfg']['saving']['log_dir'], 'visdom_logs.json')
config_data['cfg']['learner']['test'] = True
if cfg['eval_kwargs']['overwrite_configs']:
config_data['cfg'] = update_dict_deepcopy(config_data['cfg'], cfg)
set_seed(config_data['cfg']['training']['seed'])
# Get checkpoints
ckpt_paths = []
for exp_path in exp_paths:
ckpts_dir = get_subdir(exp_path, 'checkpoints')
ckpt_path = os.path.join(ckpts_dir, 'ckpt-latest.dat')
ckpt_paths.append(ckpt_path)
else:
config_data = { 'cfg': cfg, 'uuid': uuid }
ckpt_paths = [None]
exp_paths = [LOG_DIR]
if 'eval_kwargs' in cfg and 'debug' in cfg['eval_kwargs']:
if cfg['eval_kwargs']['debug']:
config_data['cfg']['saving']['logging_type'] = 'visdom'
config_data['cfg']['saving']['save_eval_videos'] = True
else:
config_data['cfg']['saving']['save_eval_videos'] = False
print(pprint.pformat(config_data))
print('Loaded:', config_data['uuid'])
agent = HabitatAgent(ckpt_path=ckpt_paths[0], config_data=config_data)
if cfg['eval_kwargs']['challenge']:
challenge = habitat.Challenge()
challenge.submit(agent)
else:
<<<<<<< HEAD
benchmark = habitat_benchmark.Benchmark(config_file=cfg['eval_kwargs']['benchmark_config'], config_dir='/')
=======
benchmark = habitat.Benchmark(config_file=cfg['eval_kwargs']['benchmark_config'], config_dir='/')
>>>>>>> f37f84194141a13cacd06f1d12b08f1a4084e45c
metrics = benchmark.evaluate(agent, cfg['eval_kwargs']['benchmark_episodes'])
agent.finish_benchmark(metrics)
benchmark._env.close()
everything = update_dict_deepcopy(metrics, config_data)
patience, unstuck_dist = config_data['cfg']['learner']['backout']['patience'], config_data['cfg']['learner']['backout']['unstuck_dist']
write_location = os.path.join(exp_paths[0], f'benchmark_data_p{patience}_d{unstuck_dist}.json')
with open(write_location, 'w') as outfile:
json.dump(everything, outfile)
if __name__ == "__main__":
ex.run_commandline()
| []
| []
| [
"IMAGEIO_FFMPEG_EXE"
]
| [] | ["IMAGEIO_FFMPEG_EXE"] | python | 1 | 0 | |
bin/cqlsh.py | #!/bin/sh
# -*- mode: Python -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":"
# bash code here; finds a suitable python interpreter and execs this file.
# prefer unqualified "python" if suitable:
python -c 'import sys; sys.exit(not (0x020700b0 < sys.hexversion < 0x03000000))' 2>/dev/null \
&& exec python "$0" "$@"
for pyver in 2.7; do
which python$pyver > /dev/null 2>&1 && exec python$pyver "$0" "$@"
done
echo "No appropriate python interpreter found." >&2
exit 1
":"""
from __future__ import with_statement
import cmd
import codecs
import ConfigParser
import csv
import getpass
import optparse
import os
import platform
import sys
import traceback
import warnings
import webbrowser
from StringIO import StringIO
from contextlib import contextmanager
from glob import glob
from uuid import UUID
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
sys.exit("\nCQL Shell supports only Python 2.7\n")
# see CASSANDRA-10428
if platform.python_implementation().startswith('Jython'):
sys.exit("\nCQL Shell does not run on Jython\n")
UTF8 = 'utf-8'
CP65001 = 'cp65001' # Win utf-8 variant
description = "CQL Shell for Apache Cassandra"
version = "5.0.1"
readline = None
try:
# check if tty first, cause readline doesn't check, and only cares
# about $TERM. we don't want the funky escape code stuff to be
# output if not a tty.
if sys.stdin.isatty():
import readline
except ImportError:
pass
CQL_LIB_PREFIX = 'cassandra-driver-internal-only-'
CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/cql3/CQL-3.2.html'
# default location of local CQL.html
if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'):
# default location of local CQL.html
CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html'
elif os.path.exists('/usr/share/doc/cassandra/CQL.html'):
# fallback to package file
CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html'
else:
# fallback to online version
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
# On Linux, the Python webbrowser module uses the 'xdg-open' executable
# to open a file/URL. But that only works, if the current session has been
# opened from _within_ a desktop environment. I.e. 'xdg-open' will fail,
# if the session's been opened via ssh to a remote box.
#
# Use 'python' to get some information about the detected browsers.
# >>> import webbrowser
# >>> webbrowser._tryorder
# >>> webbrowser._browser
#
if len(webbrowser._tryorder) == 0:
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '':
# only on Linux (some OS with xdg-open)
webbrowser._tryorder.remove('xdg-open')
webbrowser._tryorder.append('xdg-open')
# use bundled libs for python-cql and thrift, if available. if there
# is a ../lib dir, use bundled libs there preferentially.
ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
myplatform = platform.system()
is_win = myplatform == 'Windows'
# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
if is_win and sys.version_info < (3, 3):
codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
if myplatform == 'Linux':
ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
if os.environ.get('CQLSH_NO_BUNDLED', ''):
ZIPLIB_DIRS = ()
def find_zip(libprefix):
for ziplibdir in ZIPLIB_DIRS:
zips = glob(os.path.join(ziplibdir, libprefix + '*.zip'))
if zips:
return max(zips) # probably the highest version, if multiple
cql_zip = find_zip(CQL_LIB_PREFIX)
if cql_zip:
ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
third_parties = ('futures-', 'six-')
for lib in third_parties:
lib_zip = find_zip(lib)
if lib_zip:
sys.path.insert(0, lib_zip)
warnings.filterwarnings("ignore", r".*blist.*")
try:
import cassandra
except ImportError, e:
sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n"
'You might try "pip install cassandra-driver".\n\n'
'Python: %s\n'
'Module load path: %r\n\n'
'Error: %s\n' % (sys.executable, sys.path, e))
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.cqltypes import cql_typename
from cassandra.marshal import int64_unpack
from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
TableMetadata, protect_name, protect_names)
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
from cassandra.util import datetime_from_timestamp
# cqlsh should run correctly when run out of a Cassandra source tree,
# out of an unpacked Cassandra tarball, and after a proper package install.
cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib')
if os.path.isdir(cqlshlibdir):
sys.path.insert(0, cqlshlibdir)
from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling, cqlshhandling
from cqlshlib.copyutil import ExportTask, ImportTask
from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
RED, WHITE, FormattedValue, colorme)
from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
DEFAULT_TIMESTAMP_FORMAT, CqlType, DateTimeFormat,
format_by_type, formatter_for)
from cqlshlib.tracing import print_trace, print_trace_session
from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9042
DEFAULT_SSL = False
DEFAULT_PROTOCOL_VERSION = 4
DEFAULT_CONNECT_TIMEOUT_SECONDS = 5
DEFAULT_REQUEST_TIMEOUT_SECONDS = 10
DEFAULT_FLOAT_PRECISION = 5
DEFAULT_DOUBLE_PRECISION = 5
DEFAULT_MAX_TRACE_WAIT = 10
if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__:
DEFAULT_COMPLETEKEY = '\t'
else:
DEFAULT_COMPLETEKEY = 'tab'
cqldocs = None
cqlruleset = None
epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These
defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a
host (and optional port number) are given on the command line, they take
precedence over any defaults.""" % globals()
parser = optparse.OptionParser(description=description, epilog=epilog,
usage="Usage: %prog [options] [host [port]]",
version='cqlsh ' + version)
parser.add_option("-C", "--color", action='store_true', dest='color',
help='Always use color output')
parser.add_option("--no-color", action='store_false', dest='color',
help='Never use color output')
parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be:
- one of the supported browsers in https://docs.python.org/2/library/webbrowser.html.
- browser path followed by %s, example: /usr/bin/google-chrome-stable %s""")
parser.add_option('--ssl', action='store_true', help='Use SSL', default=False)
parser.add_option("-u", "--username", help="Authenticate as user.")
parser.add_option("-p", "--password", help="Authenticate using password.")
parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.')
parser.add_option("-f", "--file", help="Execute commands from FILE, then exit")
parser.add_option('--debug', action='store_true',
help='Show additional debugging information')
parser.add_option("--encoding", help="Specify a non-default encoding for output." +
" (Default: %s)" % (UTF8,))
parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
parser.add_option('--cqlversion', default=None,
help='Specify a particular CQL version, '
'by default the highest version supported by the server will be used.'
' Examples: "3.0.3", "3.1.0"')
parser.add_option("-e", "--execute", help='Execute the statement and quit.')
parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout',
help='Specify the connection timeout in seconds (default: %default seconds).')
parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout',
help='Specify the default request timeout in seconds (default: %default seconds).')
parser.add_option("-t", "--tty", action='store_true', dest='tty',
help='Force tty mode (command prompt).')
optvalues = optparse.Values()
(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
# BEGIN history/config definition
HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
if hasattr(options, 'cqlshrc'):
CONFIG_FILE = options.cqlshrc
if not os.path.exists(CONFIG_FILE):
print '\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR)
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
else:
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
if not os.path.exists(HISTORY_DIR):
try:
os.mkdir(HISTORY_DIR)
except OSError:
print '\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR
OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
if os.path.exists(OLD_CONFIG_FILE):
if os.path.exists(CONFIG_FILE):
print '\nWarning: cqlshrc config files were found at both the old location (%s) and \
the new location (%s), the old config file will not be migrated to the new \
location, and the new location will be used for now. You should manually \
consolidate the config files at the new location and remove the old file.' \
% (OLD_CONFIG_FILE, CONFIG_FILE)
else:
os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
if os.path.exists(OLD_HISTORY):
os.rename(OLD_HISTORY, HISTORY)
# END history/config definition
CQL_ERRORS = (
cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut,
cassandra.cluster.NoHostAvailable,
cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException,
cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable
)
debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES')
class NoKeyspaceError(Exception):
pass
class KeyspaceNotFound(Exception):
pass
class ColumnFamilyNotFound(Exception):
pass
class IndexNotFound(Exception):
pass
class MaterializedViewNotFound(Exception):
pass
class ObjectNotFound(Exception):
pass
class VersionNotSupported(Exception):
pass
class UserTypeNotFound(Exception):
pass
class FunctionNotFound(Exception):
pass
class AggregateNotFound(Exception):
pass
class DecodeError(Exception):
verb = 'decode'
def __init__(self, thebytes, err, colname=None):
self.thebytes = thebytes
self.err = err
self.colname = colname
def __str__(self):
return str(self.thebytes)
def message(self):
what = 'value %r' % (self.thebytes,)
if self.colname is not None:
what = 'value %r (for column %r)' % (self.thebytes, self.colname)
return 'Failed to %s %s : %s' \
% (self.verb, what, self.err)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.message())
class FormatError(DecodeError):
verb = 'format'
def full_cql_version(ver):
while ver.count('.') < 2:
ver += '.0'
ver_parts = ver.split('-', 1) + ['']
vertuple = tuple(map(int, ver_parts[0].split('.')) + [ver_parts[1]])
return ver, vertuple
def format_value(val, cqltype, encoding, addcolor=False, date_time_format=None,
float_precision=None, colormap=None, nullval=None):
if isinstance(val, DecodeError):
if addcolor:
return colorme(repr(val.thebytes), colormap, 'error')
else:
return FormattedValue(repr(val.thebytes))
return format_by_type(val, cqltype=cqltype, encoding=encoding, colormap=colormap,
addcolor=addcolor, nullval=nullval, date_time_format=date_time_format,
float_precision=float_precision)
def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None):
if file is None:
file = sys.stderr
try:
file.write(warnings.formatwarning(message, category, filename, lineno, line=''))
except IOError:
pass
warnings.showwarning = show_warning_without_quoting_line
warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure)
def insert_driver_hooks():
class DateOverFlowWarning(RuntimeWarning):
pass
# Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = int64_unpack(byts)
try:
return datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. "
"Timestamps are displayed in milliseconds from epoch."))
return timestamp_ms
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
if hasattr(cassandra, 'deserializers'):
del cassandra.deserializers.DesDateType
# Return cassandra.cqltypes.EMPTY instead of None for empty values
cassandra.cqltypes.CassandraType.support_empty_values = True
class FrozenType(cassandra.cqltypes._ParameterizedType):
"""
Needed until the bundled python driver adds FrozenType.
"""
typename = "frozen"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
class Shell(cmd.Cmd):
custom_prompt = os.getenv('CQLSH_PROMPT', '')
if custom_prompt is not '':
custom_prompt += "\n"
default_prompt = custom_prompt + "cqlsh> "
continue_prompt = " ... "
keyspace_prompt = custom_prompt + "cqlsh:%s> "
keyspace_continue_prompt = "%s ... "
show_line_nums = False
debug = False
stop = False
last_hist = None
shunted_query_out = None
use_paging = True
default_page_size = 100
def __init__(self, hostname, port, color=False,
username=None, password=None, encoding=None, stdin=None, tty=True,
completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
cqlver=None, keyspace=None,
tracing_enabled=False, expand_enabled=False,
display_nanotime_format=DEFAULT_NANOTIME_FORMAT,
display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT,
display_date_format=DEFAULT_DATE_FORMAT,
display_float_precision=DEFAULT_FLOAT_PRECISION,
display_double_precision=DEFAULT_DOUBLE_PRECISION,
display_timezone=None,
max_trace_wait=DEFAULT_MAX_TRACE_WAIT,
ssl=False,
single_statement=None,
request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
protocol_version=DEFAULT_PROTOCOL_VERSION,
connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS):
cmd.Cmd.__init__(self, completekey=completekey)
self.hostname = hostname
self.port = port
self.auth_provider = None
if username:
if not password:
password = getpass.getpass()
self.auth_provider = PlainTextAuthProvider(username=username, password=password)
self.username = username
self.keyspace = keyspace
self.ssl = ssl
self.tracing_enabled = tracing_enabled
self.page_size = self.default_page_size
self.expand_enabled = expand_enabled
if use_conn:
self.conn = use_conn
else:
self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver,
protocol_version=protocol_version,
auth_provider=self.auth_provider,
ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=connect_timeout,
connect_timeout=connect_timeout)
self.owns_connection = not use_conn
if keyspace:
self.session = self.conn.connect(keyspace)
else:
self.session = self.conn.connect()
if browser == "":
browser = None
self.browser = browser
self.color = color
self.display_nanotime_format = display_nanotime_format
self.display_timestamp_format = display_timestamp_format
self.display_date_format = display_date_format
self.display_float_precision = display_float_precision
self.display_double_precision = display_double_precision
self.display_timezone = display_timezone
self.session.default_timeout = request_timeout
self.session.row_factory = ordered_dict_factory
self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE
self.get_connection_versions()
self.set_expanded_cql_version(self.connection_versions['cql'])
self.current_keyspace = keyspace
self.max_trace_wait = max_trace_wait
self.session.max_trace_wait = max_trace_wait
self.tty = tty
self.encoding = encoding
self.check_windows_encoding()
self.output_codec = codecs.lookup(encoding)
self.statement = StringIO()
self.lineno = 1
self.in_comment = False
self.prompt = ''
if stdin is None:
stdin = sys.stdin
if tty:
self.reset_prompt()
self.report_connection()
print 'Use HELP for help.'
else:
self.show_line_nums = True
self.stdin = stdin
self.query_out = sys.stdout
self.consistency_level = cassandra.ConsistencyLevel.ONE
self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL
self.empty_lines = 0
self.statement_error = False
self.single_statement = single_statement
@property
def is_using_utf8(self):
# utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
def check_windows_encoding(self):
if is_win and os.name == 'nt' and self.tty and \
self.is_using_utf8 and sys.stdout.encoding != CP65001:
self.printerr("\nWARNING: console codepage must be set to cp65001 "
"to support {} encoding on Windows platforms.\n"
"If you experience encoding problems, change your console"
" codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
def set_expanded_cql_version(self, ver):
ver, vertuple = full_cql_version(ver)
self.cql_version = ver
self.cql_ver_tuple = vertuple
def cqlver_atleast(self, major, minor=0, patch=0):
return self.cql_ver_tuple[:3] >= (major, minor, patch)
def myformat_value(self, val, cqltype=None, **kwargs):
if isinstance(val, DecodeError):
self.decoding_errors.append(val)
try:
dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format,
date_format=self.display_date_format, nanotime_format=self.display_nanotime_format,
timezone=self.display_timezone)
precision = self.display_double_precision if cqltype is not None and cqltype.type_name == 'double' \
else self.display_float_precision
return format_value(val, cqltype=cqltype, encoding=self.output_codec.name,
addcolor=self.color, date_time_format=dtformats,
float_precision=precision, **kwargs)
except Exception, e:
err = FormatError(val, e)
self.decoding_errors.append(err)
return format_value(err, cqltype=cqltype, encoding=self.output_codec.name, addcolor=self.color)
def myformat_colname(self, name, table_meta=None):
column_colors = COLUMN_NAME_COLORS.copy()
# check column role and color appropriately
if table_meta:
if name in [col.name for col in table_meta.partition_key]:
column_colors.default_factory = lambda: RED
elif name in [col.name for col in table_meta.clustering_key]:
column_colors.default_factory = lambda: CYAN
elif name in table_meta.columns and table_meta.columns[name].is_static:
column_colors.default_factory = lambda: WHITE
return self.myformat_value(name, colormap=column_colors)
def report_connection(self):
self.show_host()
self.show_version()
def show_host(self):
print "Connected to %s at %s:%d." % \
(self.applycolor(self.get_cluster_name(), BLUE),
self.hostname,
self.port)
def show_version(self):
vers = self.connection_versions.copy()
vers['shver'] = version
# system.Versions['cql'] apparently does not reflect changes with
# set_cql_version.
vers['cql'] = self.cql_version
print "[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
'build': result['release_version'],
'protocol': result['native_protocol_version'],
'cql': result['cql_version'],
}
self.connection_versions = vers
def get_keyspace_names(self):
return map(str, self.conn.metadata.keyspaces.keys())
def get_columnfamily_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).tables.keys())
def get_materialized_view_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).views.keys())
def get_index_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).indexes.keys())
def get_column_names(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
layout = self.get_table_meta(ksname, cfname)
return [unicode(col) for col in layout.columns]
def get_usertype_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return self.get_keyspace_meta(ksname).user_types.keys()
def get_usertype_layout(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
ks_meta = self.get_keyspace_meta(ksname)
try:
user_type = ks_meta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
return zip(user_type.field_names, user_type.field_types)
def get_userfunction_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).functions.values())
def get_useraggregate_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).aggregates.values())
def get_cluster_name(self):
return self.conn.metadata.cluster_name
def get_partitioner(self):
return self.conn.metadata.partitioner
def get_keyspace_meta(self, ksname):
if ksname not in self.conn.metadata.keyspaces:
raise KeyspaceNotFound('Keyspace %r not found.' % ksname)
return self.conn.metadata.keyspaces[ksname]
def get_keyspaces(self):
return self.conn.metadata.keyspaces.values()
def get_ring(self, ks):
self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True)
return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks]
def get_table_meta(self, ksname, tablename):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if tablename not in ksmeta.tables:
if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']:
self.get_fake_auth_table_meta(ksname, tablename)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
else:
return ksmeta.tables[tablename]
def get_fake_auth_table_meta(self, ksname, tablename):
# may be using external auth implementation so internal tables
# aren't actually defined in schema. In this case, we'll fake
# them up
if tablename == 'roles':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'roles')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType)
table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType)
elif tablename == 'role_permissions':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'role_permissions')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type)
table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
def get_index_meta(self, ksname, idxname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if idxname not in ksmeta.indexes:
raise IndexNotFound("Index %r not found" % idxname)
return ksmeta.indexes[idxname]
def get_view_meta(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if viewname not in ksmeta.views:
raise MaterializedViewNotFound("Materialized view %r not found" % viewname)
return ksmeta.views[viewname]
def get_object_meta(self, ks, name):
if name is None:
if ks and ks in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ks]
elif self.current_keyspace is None:
raise ObjectNotFound("%r not found in keyspaces" % (ks))
else:
name = ks
ks = self.current_keyspace
if ks is None:
ks = self.current_keyspace
ksmeta = self.get_keyspace_meta(ks)
if name in ksmeta.tables:
return ksmeta.tables[name]
elif name in ksmeta.indexes:
return ksmeta.indexes[name]
elif name in ksmeta.views:
return ksmeta.views[name]
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def get_usertypes_meta(self):
data = self.session.execute("select * from system.schema_usertypes")
if not data:
return cql3handling.UserTypesMeta({})
return cql3handling.UserTypesMeta.from_layout(data)
def get_trigger_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [trigger.name
for table in self.get_keyspace_meta(ksname).tables.values()
for trigger in table.triggers.values()]
def reset_statement(self):
self.reset_prompt()
self.statement.truncate(0)
self.empty_lines = 0
def reset_prompt(self):
if self.current_keyspace is None:
self.set_prompt(self.default_prompt, True)
else:
self.set_prompt(self.keyspace_prompt % self.current_keyspace, True)
def set_continue_prompt(self):
if self.empty_lines >= 3:
self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.")
self.empty_lines = 0
return
if self.current_keyspace is None:
self.set_prompt(self.continue_prompt)
else:
spaces = ' ' * len(str(self.current_keyspace))
self.set_prompt(self.keyspace_continue_prompt % spaces)
self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0
@contextmanager
def prepare_loop(self):
readline = None
if self.tty and self.completekey:
try:
import readline
except ImportError:
if is_win:
print "WARNING: pyreadline dependency missing. Install to enable tab completion."
pass
else:
old_completer = readline.get_completer()
readline.set_completer(self.complete)
if readline.__doc__ is not None and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '" + self.completekey + "' rl_complete")
readline.parse_and_bind("bind ^R em-inc-search-prev")
else:
readline.parse_and_bind(self.completekey + ": complete")
try:
yield
finally:
if readline is not None:
readline.set_completer(old_completer)
def get_input_line(self, prompt=''):
if self.tty:
try:
self.lastcmd = raw_input(prompt).decode(self.encoding)
except UnicodeDecodeError:
self.lastcmd = ''
traceback.print_exc()
self.check_windows_encoding()
line = self.lastcmd + '\n'
else:
self.lastcmd = self.stdin.readline()
line = self.lastcmd
if not len(line):
raise EOFError
self.lineno += 1
return line
def use_stdin_reader(self, until='', prompt=''):
until += '\n'
while True:
try:
newline = self.get_input_line(prompt=prompt)
except EOFError:
return
if newline == until:
return
yield newline
def cmdloop(self):
"""
Adapted from cmd.Cmd's version, because there is literally no way with
cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
input and an actual EOF.
"""
with self.prepare_loop():
while not self.stop:
try:
if self.single_statement:
line = self.single_statement
self.stop = True
else:
line = self.get_input_line(self.prompt)
self.statement.write(line)
if self.onecmd(self.statement.getvalue()):
self.reset_statement()
except EOFError:
self.handle_eof()
except CQL_ERRORS, cqlerr:
self.printerr(cqlerr.message.decode(encoding='utf-8'))
except KeyboardInterrupt:
self.reset_statement()
print
def onecmd(self, statementtext):
"""
Returns true if the statement is complete and was handled (meaning it
can be reset).
"""
try:
statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
except pylexotron.LexingError, e:
if self.show_line_nums:
self.printerr('Invalid syntax at char %d' % (e.charnum,))
else:
self.printerr('Invalid syntax at line %d, char %d'
% (e.linenum, e.charnum))
statementline = statementtext.split('\n')[e.linenum - 1]
self.printerr(' %s' % statementline)
self.printerr(' %s^' % (' ' * e.charnum))
return True
while statements and not statements[-1]:
statements = statements[:-1]
if not statements:
return True
if endtoken_escaped or statements[-1][-1][0] != 'endtoken':
self.set_continue_prompt()
return
for st in statements:
try:
self.handle_statement(st, statementtext)
except Exception, e:
if self.debug:
traceback.print_exc()
else:
self.printerr(e)
return True
def handle_eof(self):
if self.tty:
print
statement = self.statement.getvalue()
if statement.strip():
if not self.onecmd(statement):
self.printerr('Incomplete statement at end of file')
self.do_exit()
def handle_statement(self, tokens, srcstr):
# Concat multi-line statements and insert into history
if readline is not None:
nl_count = srcstr.count("\n")
new_hist = srcstr.replace("\n", " ").rstrip()
if nl_count > 1 and self.last_hist != new_hist:
readline.add_history(new_hist.encode(self.encoding))
self.last_hist = new_hist
cmdword = tokens[0][1]
if cmdword == '?':
cmdword = 'help'
custom_handler = getattr(self, 'do_' + cmdword.lower(), None)
if custom_handler:
parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr,
startsymbol='cqlshCommand')
if parsed and not parsed.remainder:
# successful complete parse
return custom_handler(parsed)
else:
return self.handle_parse_error(cmdword, tokens, parsed, srcstr)
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
def handle_parse_error(self, cmdword, tokens, parsed, srcstr):
if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate',
'create', 'drop', 'alter', 'grant', 'revoke',
'batch', 'list'):
# hey, maybe they know about some new syntax we don't. type
# assumptions won't work, but maybe the query will.
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
self.printerr('Improper %s command.' % cmdword)
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig()))
if success:
if ksname[0] == '"' and ksname[-1] == '"':
self.current_keyspace = self.cql_unprotect_name(ksname)
else:
self.current_keyspace = ksname.lower()
def do_select(self, parsed):
tracing_was_enabled = self.tracing_enabled
ksname = parsed.get_binding('ksname')
stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces')
self.tracing_enabled = self.tracing_enabled and not stop_tracing
statement = parsed.extract_orig()
self.perform_statement(statement)
self.tracing_enabled = tracing_was_enabled
def perform_statement(self, statement):
stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
success, future = self.perform_simple_statement(stmt)
if future:
if future.warnings:
self.print_warnings(future.warnings)
if self.tracing_enabled:
try:
for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level):
print_trace(self, trace)
except TraceUnavailable:
msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,)
self.writeresult(msg, color=RED)
for trace_id in future.get_query_trace_ids():
self.show_session(trace_id, partial_session=True)
except Exception, err:
self.printerr("Unable to fetch query trace: %s" % (str(err),))
return success
def parse_for_select_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname', None))
try:
return self.get_table_meta(ks, name)
except ColumnFamilyNotFound:
try:
return self.get_view_meta(ks, name)
except MaterializedViewNotFound:
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def parse_for_update_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
return self.get_table_meta(ks, cf)
def perform_simple_statement(self, statement):
if not statement:
return False, None
future = self.session.execute_async(statement, trace=self.tracing_enabled)
result = None
try:
result = future.result()
except CQL_ERRORS, err:
self.printerr(unicode(err.__class__.__name__) + u": " + err.message.decode(encoding='utf-8'))
except Exception:
import traceback
self.printerr(traceback.format_exc())
# Even if statement failed we try to refresh schema if not agreed (see CASSANDRA-9689)
if not future.is_schema_agreed:
try:
self.conn.refresh_schema_metadata(5) # will throw exception if there is a schema mismatch
except Exception:
self.printerr("Warning: schema version mismatch detected; check the schema versions of your "
"nodes in system.local and system.peers.")
self.conn.refresh_schema_metadata(-1)
if result is None:
return False, None
if statement.query_string[:6].lower() == 'select':
self.print_result(result, self.parse_for_select_meta(statement.query_string))
elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"):
self.print_result(result, self.get_table_meta('system_auth', 'roles'))
elif statement.query_string.lower().startswith("list"):
self.print_result(result, self.get_table_meta('system_auth', 'role_permissions'))
elif result:
# CAS INSERT/UPDATE
self.writeresult("")
self.print_static_result(result, self.parse_for_update_meta(statement.query_string))
self.flush_output()
return True, future
def print_result(self, result, table_meta):
self.decoding_errors = []
self.writeresult("")
if result.has_more_pages and self.tty:
num_rows = 0
while True:
if result.current_rows:
num_rows += len(result.current_rows)
self.print_static_result(result, table_meta)
if result.has_more_pages:
raw_input("---MORE---")
result.fetch_next_page()
else:
break
else:
num_rows = len(result.current_rows)
self.print_static_result(result, table_meta)
self.writeresult("(%d rows)" % num_rows)
if self.decoding_errors:
for err in self.decoding_errors[:2]:
self.writeresult(err.message(), color=RED)
if len(self.decoding_errors) > 2:
self.writeresult('%d more decoding errors suppressed.'
% (len(self.decoding_errors) - 2), color=RED)
def print_static_result(self, result, table_meta):
if not result.column_names and not table_meta:
return
column_names = result.column_names or table_meta.columns.keys()
formatted_names = [self.myformat_colname(name, table_meta) for name in column_names]
if not result.current_rows:
# print header only
self.print_formatted_result(formatted_names, None)
return
cql_types = []
if result.column_types:
ks_name = table_meta.keyspace_name if table_meta else self.current_keyspace
ks_meta = self.conn.metadata.keyspaces.get(ks_name, None)
cql_types = [CqlType(cql_typename(t), ks_meta) for t in result.column_types]
formatted_values = [map(self.myformat_value, row.values(), cql_types) for row in result.current_rows]
if self.expand_enabled:
self.print_formatted_result_vertically(formatted_names, formatted_values)
else:
self.print_formatted_result(formatted_names, formatted_values)
def print_formatted_result(self, formatted_names, formatted_values):
# determine column widths
widths = [n.displaywidth for n in formatted_names]
if formatted_values is not None:
for fmtrow in formatted_values:
for num, col in enumerate(fmtrow):
widths[num] = max(widths[num], col.displaywidth)
# print header
header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths))
self.writeresult(' ' + header.rstrip())
self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths))
# stop if there are no rows
if formatted_values is None:
self.writeresult("")
return
# print row data
for row in formatted_values:
line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths))
self.writeresult(' ' + line)
self.writeresult("")
def print_formatted_result_vertically(self, formatted_names, formatted_values):
max_col_width = max([n.displaywidth for n in formatted_names])
max_val_width = max([n.displaywidth for row in formatted_values for n in row])
# for each row returned, list all the column-value pairs
for row_id, row in enumerate(formatted_values):
self.writeresult("@ Row %d" % (row_id + 1))
self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width]))
for field_id, field in enumerate(row):
column = formatted_names[field_id].ljust(max_col_width, color=self.color)
value = field.ljust(field.displaywidth, color=self.color)
self.writeresult(' ' + " | ".join([column, value]))
self.writeresult('')
def print_warnings(self, warnings):
if warnings is None or len(warnings) == 0:
return
self.writeresult('')
self.writeresult('Warnings :')
for warning in warnings:
self.writeresult(warning)
self.writeresult('')
def emptyline(self):
pass
def parseline(self, line):
# this shouldn't be needed
raise NotImplementedError
def complete(self, text, state):
if readline is None:
return
if state == 0:
try:
self.completion_matches = self.find_completions(text)
except Exception:
if debug_completion:
import traceback
traceback.print_exc()
else:
raise
try:
return self.completion_matches[state]
except IndexError:
return None
def find_completions(self, text):
curline = readline.get_line_buffer()
prevlines = self.statement.getvalue()
wholestmt = prevlines + curline
begidx = readline.get_begidx() + len(prevlines)
stuff_to_complete = wholestmt[:begidx]
return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self,
debug=debug_completion, startsymbol='cqlshCommand')
def set_prompt(self, prompt, prepend_user=False):
if prepend_user and self.username:
self.prompt = "%s@%s" % (self.username, prompt)
return
self.prompt = prompt
def cql_unprotect_name(self, namestr):
if namestr is None:
return
return cqlruleset.dequote_name(namestr)
def cql_unprotect_value(self, valstr):
if valstr is not None:
return cqlruleset.dequote_value(valstr)
def print_recreate_keyspace(self, ksdef, out):
out.write(ksdef.export_as_string())
out.write("\n")
def print_recreate_columnfamily(self, ksname, cfname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given table.
Writes output to the given out stream.
"""
out.write(self.get_table_meta(ksname, cfname).export_as_string())
out.write("\n")
def print_recreate_index(self, ksname, idxname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given index.
Writes output to the given out stream.
"""
out.write(self.get_index_meta(ksname, idxname).export_as_string())
out.write("\n")
def print_recreate_materialized_view(self, ksname, viewname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given materialized view.
Writes output to the given out stream.
"""
out.write(self.get_view_meta(ksname, viewname).export_as_string())
out.write("\n")
def print_recreate_object(self, ks, name, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given object (ks, table or index).
Writes output to the given out stream.
"""
out.write(self.get_object_meta(ks, name).export_as_string())
out.write("\n")
def describe_keyspaces(self):
print
cmd.Cmd.columnize(self, protect_names(self.get_keyspace_names()))
print
def describe_keyspace(self, ksname):
print
self.print_recreate_keyspace(self.get_keyspace_meta(ksname), sys.stdout)
print
def describe_columnfamily(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_columnfamily(ksname, cfname, sys.stdout)
print
def describe_index(self, ksname, idxname):
print
self.print_recreate_index(ksname, idxname, sys.stdout)
print
def describe_materialized_view(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_materialized_view(ksname, viewname, sys.stdout)
print
def describe_object(self, ks, name):
print
self.print_recreate_object(ks, name, sys.stdout)
print
def describe_columnfamilies(self, ksname):
print
if ksname is None:
for k in self.get_keyspaces():
name = protect_name(k.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(k.name)))
print
else:
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(ksname)))
print
def describe_functions(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(ksmeta.functions.keys()))
print
else:
ksmeta = self.get_keyspace_meta(ksname)
cmd.Cmd.columnize(self, protect_names(ksmeta.functions.keys()))
print
def describe_function(self, ksname, functionname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
functions = filter(lambda f: f.name == functionname, ksmeta.functions.values())
if len(functions) == 0:
raise FunctionNotFound("User defined function %r not found" % functionname)
print "\n\n".join(func.export_as_string() for func in functions)
print
def describe_aggregates(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(ksmeta.aggregates.keys()))
print
else:
ksmeta = self.get_keyspace_meta(ksname)
cmd.Cmd.columnize(self, protect_names(ksmeta.aggregates.keys()))
print
def describe_aggregate(self, ksname, aggregatename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
aggregates = filter(lambda f: f.name == aggregatename, ksmeta.aggregates.values())
if len(aggregates) == 0:
raise FunctionNotFound("User defined aggregate %r not found" % aggregatename)
print "\n\n".join(aggr.export_as_string() for aggr in aggregates)
print
def describe_usertypes(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(ksmeta.user_types.keys()))
print
else:
ksmeta = self.get_keyspace_meta(ksname)
cmd.Cmd.columnize(self, protect_names(ksmeta.user_types.keys()))
print
def describe_usertype(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
try:
usertype = ksmeta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
print usertype.export_as_string()
def describe_cluster(self):
print '\nCluster: %s' % self.get_cluster_name()
p = trim_if_present(self.get_partitioner(), 'org.apache.cassandra.dht.')
print 'Partitioner: %s\n' % p
# TODO: snitch?
# snitch = trim_if_present(self.get_snitch(), 'org.apache.cassandra.locator.')
# print 'Snitch: %s\n' % snitch
if self.current_keyspace is not None and self.current_keyspace != 'system':
print "Range ownership:"
ring = self.get_ring(self.current_keyspace)
for entry in ring.items():
print ' %39s [%s]' % (str(entry[0].value), ', '.join([host.address for host in entry[1]]))
print
def describe_schema(self, include_system=False):
print
for k in self.get_keyspaces():
if include_system or k.name not in cql3handling.SYSTEM_KEYSPACES:
self.print_recreate_keyspace(k, sys.stdout)
print
def do_describe(self, parsed):
"""
DESCRIBE [cqlsh only]
(DESC may be used as a shorthand.)
Outputs information about the connected Cassandra cluster, or about
the data objects stored in the cluster. Use in one of the following ways:
DESCRIBE KEYSPACES
Output the names of all keyspaces.
DESCRIBE KEYSPACE [<keyspacename>]
Output CQL commands that could be used to recreate the given keyspace,
and the objects in it (such as tables, types, functions, etc.).
In some cases, as the CQL interface matures, there will be some metadata
about a keyspace that is not representable with CQL. That metadata will not be shown.
The '<keyspacename>' argument may be omitted, in which case the current
keyspace will be described.
DESCRIBE TABLES
Output the names of all tables in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TABLE [<keyspace>.]<tablename>
Output CQL commands that could be used to recreate the given table.
In some cases, as above, there may be table metadata which is not
representable and which will not be shown.
DESCRIBE INDEX <indexname>
Output the CQL command that could be used to recreate the given index.
In some cases, there may be index metadata which is not representable
and which will not be shown.
DESCRIBE MATERIALIZED VIEW <viewname>
Output the CQL command that could be used to recreate the given materialized view.
In some cases, there may be materialized view metadata which is not representable
and which will not be shown.
DESCRIBE CLUSTER
Output information about the connected Cassandra cluster, such as the
cluster name, and the partitioner and snitch in use. When you are
connected to a non-system keyspace, also shows endpoint-range
ownership information for the Cassandra ring.
DESCRIBE [FULL] SCHEMA
Output CQL commands that could be used to recreate the entire (non-system) schema.
Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace
k. Use DESCRIBE FULL SCHEMA to include the system keyspaces.
DESCRIBE TYPES
Output the names of all user-defined-types in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TYPE [<keyspace>.]<type>
Output the CQL command that could be used to recreate the given user-defined-type.
DESCRIBE FUNCTIONS
Output the names of all user-defined-functions in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE FUNCTION [<keyspace>.]<function>
Output the CQL command that could be used to recreate the given user-defined-function.
DESCRIBE AGGREGATES
Output the names of all user-defined-aggregates in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE AGGREGATE [<keyspace>.]<aggregate>
Output the CQL command that could be used to recreate the given user-defined-aggregate.
DESCRIBE <objname>
Output CQL commands that could be used to recreate the entire object schema,
where object can be either a keyspace or a table or an index or a materialized
view (in this order).
"""
what = parsed.matched[1][1].lower()
if what == 'functions':
self.describe_functions(self.current_keyspace)
elif what == 'function':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
functionname = self.cql_unprotect_name(parsed.get_binding('udfname'))
self.describe_function(ksname, functionname)
elif what == 'aggregates':
self.describe_aggregates(self.current_keyspace)
elif what == 'aggregate':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
aggregatename = self.cql_unprotect_name(parsed.get_binding('udaname'))
self.describe_aggregate(ksname, aggregatename)
elif what == 'keyspaces':
self.describe_keyspaces()
elif what == 'keyspace':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', ''))
if not ksname:
ksname = self.current_keyspace
if ksname is None:
self.printerr('Not in any keyspace.')
return
self.describe_keyspace(ksname)
elif what in ('columnfamily', 'table'):
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
self.describe_columnfamily(ks, cf)
elif what == 'index':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
idx = self.cql_unprotect_name(parsed.get_binding('idxname', None))
self.describe_index(ks, idx)
elif what == 'materialized' and parsed.matched[2][1].lower() == 'view':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
mv = self.cql_unprotect_name(parsed.get_binding('mvname'))
self.describe_materialized_view(ks, mv)
elif what in ('columnfamilies', 'tables'):
self.describe_columnfamilies(self.current_keyspace)
elif what == 'types':
self.describe_usertypes(self.current_keyspace)
elif what == 'type':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
ut = self.cql_unprotect_name(parsed.get_binding('utname'))
self.describe_usertype(ks, ut)
elif what == 'cluster':
self.describe_cluster()
elif what == 'schema':
self.describe_schema(False)
elif what == 'full' and parsed.matched[2][1].lower() == 'schema':
self.describe_schema(True)
elif what:
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname'))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('idxname', None))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('mvname', None))
self.describe_object(ks, name)
do_desc = do_describe
def do_copy(self, parsed):
r"""
COPY [cqlsh only]
COPY x FROM: Imports CSV data into a Cassandra table
COPY x TO: Exports data from a Cassandra table in CSV format.
COPY <table_name> [ ( column [, ...] ) ]
FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN )
[ WITH <option>='value' [AND ...] ];
File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv.
COPY <table_name> [ ( column [, ...] ) ]
TO ( '<filename>' | STDOUT )
[ WITH <option>='value' [AND ...] ];
Available common COPY options and defaults:
DELIMITER=',' - character that appears between records
QUOTE='"' - quoting character to be used to quote fields
ESCAPE='\' - character to appear before the QUOTE char when quoted
HEADER=false - whether to ignore the first line
NULL='' - string that represents a null value
DATETIMEFORMAT= - timestamp strftime format
'%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc
MAXATTEMPTS=5 - the maximum number of attempts per batch or range
REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds
DECIMALSEP='.' - the separator for decimal values
THOUSANDSSEP='' - the separator for thousands digit groups
BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false,
for example yes,no or 1,0
NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one
capped at 16
CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser
documentation) where you can specify WITH options under the following optional
sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table],
[copy-from:ks.table], where <ks> is your keyspace name and <table> is your table
name. Options are read from these sections, in the order specified
above, and command line options always override options in configuration files.
Depending on the COPY direction, only the relevant copy-from or copy-to sections
are used. If no configfile is specified then .cqlshrc is searched instead.
RATEFILE='' - an optional file where to print the output statistics
Available COPY FROM options and defaults:
CHUNKSIZE=5000 - the size of chunks passed to worker processes
INGESTRATE=100000 - an approximate ingest rate in rows per second
MINBATCHSIZE=10 - the minimum size of an import batch
MAXBATCHSIZE=20 - the maximum size of an import batch
MAXROWS=-1 - the maximum number of rows, -1 means no maximum
SKIPROWS=0 - the number of rows to skip
SKIPCOLS='' - a comma separated list of column names to skip
MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum
MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum
ERRFILE='' - a file where to store all rows that could not be imported, by default this is
import_ks_table.err where <ks> is your keyspace and <table> is your table name.
PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to
False if you don't mind shifting data parsing to the cluster. The cluster will also
have to compile every batch statement. For large and oversized clusters
this will result in a faster import but for smaller clusters it may generate
timeouts.
TTL=3600 - the time to live in seconds, by default data will not expire
Available COPY TO options and defaults:
ENCODING='utf8' - encoding for CSV output
PAGESIZE='1000' - the page size for fetching results
PAGETIMEOUT=10 - the page timeout in seconds for fetching results
BEGINTOKEN='' - the minimum token string to consider when exporting data
ENDTOKEN='' - the maximum token string to consider when exporting data
MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel
MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines,
beyond this maximum the output file will be split into segments,
-1 means unlimited.
FLOATPRECISION=5 - the number of digits displayed after the decimal point for cql float values
DOUBLEPRECISION=12 - the number of digits displayed after the decimal point for cql double values
When entering CSV data on STDIN, you can use the sequence "\."
on a line by itself to end the data input.
"""
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
if ks is None:
ks = self.current_keyspace
if ks is None:
raise NoKeyspaceError("Not in any keyspace.")
table = self.cql_unprotect_name(parsed.get_binding('cfname'))
columns = parsed.get_binding('colnames', None)
if columns is not None:
columns = map(self.cql_unprotect_name, columns)
else:
# default to all known columns
columns = self.get_column_names(ks, table)
fname = parsed.get_binding('fname', None)
if fname is not None:
fname = self.cql_unprotect_value(fname)
copyoptnames = map(str.lower, parsed.get_binding('optnames', ()))
copyoptvals = map(self.cql_unprotect_value, parsed.get_binding('optvals', ()))
opts = dict(zip(copyoptnames, copyoptvals))
direction = parsed.get_binding('dir').upper()
if direction == 'FROM':
task = ImportTask(self, ks, table, columns, fname, opts, DEFAULT_PROTOCOL_VERSION, CONFIG_FILE)
elif direction == 'TO':
task = ExportTask(self, ks, table, columns, fname, opts, DEFAULT_PROTOCOL_VERSION, CONFIG_FILE)
else:
raise SyntaxError("Unknown direction %s" % direction)
task.run()
def do_show(self, parsed):
"""
SHOW [cqlsh only]
Displays information about the current cqlsh session. Can be called in
the following ways:
SHOW VERSION
Shows the version and build of the connected Cassandra instance, as
well as the versions of the CQL spec and the Thrift protocol that
the connected Cassandra instance understands.
SHOW HOST
Shows where cqlsh is currently connected.
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
self.get_connection_versions()
self.show_version()
elif showwhat == 'host':
self.show_host()
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
def do_source(self, parsed):
"""
SOURCE [cqlsh only]
Executes a file containing CQL statements. Gives the output for each
statement in turn, if any, or any errors that occur along the way.
Errors do NOT abort execution of the CQL source file.
Usage:
SOURCE '<file>';
That is, the path to the file to be executed must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
See also the --file option to cqlsh.
"""
fname = parsed.get_binding('fname')
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
encoding, bom_size = get_file_encoding_bomsize(fname)
f = codecs.open(fname, 'r', encoding)
f.seek(bom_size)
except IOError, e:
self.printerr('Could not open %r: %s' % (fname, e))
return
username = self.auth_provider.username if self.auth_provider else None
password = self.auth_provider.password if self.auth_provider else None
subshell = Shell(self.hostname, self.port, color=self.color,
username=username, password=password,
encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
cqlver=self.cql_version, keyspace=self.current_keyspace,
tracing_enabled=self.tracing_enabled,
display_nanotime_format=self.display_nanotime_format,
display_timestamp_format=self.display_timestamp_format,
display_date_format=self.display_date_format,
display_float_precision=self.display_float_precision,
display_double_precision=self.display_double_precision,
display_timezone=self.display_timezone,
max_trace_wait=self.max_trace_wait, ssl=self.ssl,
request_timeout=self.session.default_timeout,
connect_timeout=self.conn.connect_timeout)
subshell.cmdloop()
f.close()
def do_capture(self, parsed):
"""
CAPTURE [cqlsh only]
Begins capturing command output and appending it to a specified file.
Output will not be shown at the console while it is captured.
Usage:
CAPTURE '<file>';
CAPTURE OFF;
CAPTURE;
That is, the path to the file to be appended to must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
Only query result output is captured. Errors and output from cqlsh-only
commands will still be shown in the cqlsh session.
To stop capturing output and show it in the cqlsh session again, use
CAPTURE OFF.
To inspect the current capture configuration, use CAPTURE with no
arguments.
"""
fname = parsed.get_binding('fname')
if fname is None:
if self.shunted_query_out is not None:
print "Currently capturing query output to %r." % (self.query_out.name,)
else:
print "Currently not capturing query output."
return
if fname.upper() == 'OFF':
if self.shunted_query_out is None:
self.printerr('Not currently capturing output.')
return
self.query_out.close()
self.query_out = self.shunted_query_out
self.color = self.shunted_color
self.shunted_query_out = None
del self.shunted_color
return
if self.shunted_query_out is not None:
self.printerr('Already capturing output to %s. Use CAPTURE OFF'
' to disable.' % (self.query_out.name,))
return
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
f = open(fname, 'a')
except IOError, e:
self.printerr('Could not open %r for append: %s' % (fname, e))
return
self.shunted_query_out = self.query_out
self.shunted_color = self.color
self.query_out = f
self.color = False
print 'Now capturing query output to %r.' % (fname,)
def do_tracing(self, parsed):
"""
TRACING [cqlsh]
Enables or disables request tracing.
TRACING ON
Enables tracing for all further requests.
TRACING OFF
Disables tracing.
TRACING
TRACING with no arguments shows the current tracing status.
"""
self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr)
def do_expand(self, parsed):
"""
EXPAND [cqlsh]
Enables or disables expanded (vertical) output.
EXPAND ON
Enables expanded (vertical) output.
EXPAND OFF
Disables expanded (vertical) output.
EXPAND
EXPAND with no arguments shows the current value of expand setting.
"""
self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr)
def do_consistency(self, parsed):
"""
CONSISTENCY [cqlsh only]
Overrides default consistency level (default level is ONE).
CONSISTENCY <level>
Sets consistency level for future requests.
Valid consistency levels:
ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL.
SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates.
CONSISTENCY
CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level])
return
self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Consistency level set to %s.' % (level.upper(),)
def do_serial(self, parsed):
"""
SERIAL CONSISTENCY [cqlsh only]
Overrides serial consistency level (default level is SERIAL).
SERIAL CONSISTENCY <level>
Sets consistency level for future conditional updates.
Valid consistency levels:
SERIAL, LOCAL_SERIAL.
SERIAL CONSISTENCY
SERIAL CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level])
return
self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Serial consistency level set to %s.' % (level.upper(),)
def do_login(self, parsed):
"""
LOGIN [cqlsh only]
Changes login information without requiring restart.
LOGIN <username> (<password>)
Login using the specified username. If password is specified, it will be used
otherwise, you will be prompted to enter.
"""
username = parsed.get_binding('username')
password = parsed.get_binding('password')
if password is None:
password = getpass.getpass()
else:
password = password[1:-1]
auth_provider = PlainTextAuthProvider(username=username, password=password)
conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version,
protocol_version=self.conn.protocol_version,
auth_provider=auth_provider,
ssl_options=self.conn.ssl_options,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=self.conn.connect_timeout,
connect_timeout=self.conn.connect_timeout)
if self.current_keyspace:
session = conn.connect(self.current_keyspace)
else:
session = conn.connect()
# Update after we've connected in case we fail to authenticate
self.conn = conn
self.auth_provider = auth_provider
self.username = username
self.session = session
def do_exit(self, parsed=None):
"""
EXIT/QUIT [cqlsh only]
Exits cqlsh.
"""
self.stop = True
if self.owns_connection:
self.conn.shutdown()
do_quit = do_exit
def do_clear(self, parsed):
"""
CLEAR/CLS [cqlsh only]
Clears the console.
"""
import subprocess
subprocess.call(['clear', 'cls'][is_win], shell=True)
do_cls = do_clear
def do_debug(self, parsed):
import pdb
pdb.set_trace()
def get_help_topics(self):
topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__]
for hide_from_help in ('quit',):
topics.remove(hide_from_help)
return topics
def columnize(self, slist, *a, **kw):
return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw)
def do_help(self, parsed):
"""
HELP [cqlsh only]
Gives information about cqlsh commands. To see available topics,
enter "HELP" without any arguments. To see help on a topic,
use "HELP <topic>".
"""
topics = parsed.get_binding('topic', ())
if not topics:
shell_topics = [t.upper() for t in self.get_help_topics()]
self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80)
cql_topics = [t.upper() for t in cqldocs.get_help_topics()]
self.print_topics("CQL help topics:", cql_topics, 15, 80)
return
for t in topics:
if t.lower() in self.get_help_topics():
doc = getattr(self, 'do_' + t.lower()).__doc__
self.stdout.write(doc + "\n")
elif t.lower() in cqldocs.get_help_topics():
urlpart = cqldocs.get_help_topic(t)
if urlpart is not None:
url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart)
if len(webbrowser._tryorder) == 0:
self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url))
elif self.browser is not None:
webbrowser.get(self.browser).open_new_tab(url)
else:
webbrowser.open_new_tab(url)
else:
self.printerr("*** No help on %s" % (t,))
def do_unicode(self, parsed):
"""
Textual input/output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
def do_paging(self, parsed):
"""
PAGING [cqlsh]
Enables or disables query paging.
PAGING ON
Enables query paging for all further queries.
PAGING OFF
Disables paging.
PAGING
PAGING with no arguments shows the current query paging status.
"""
(self.use_paging, requested_page_size) = SwitchCommandWithValue(
"PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr)
if self.use_paging and requested_page_size is not None:
self.page_size = requested_page_size
if self.use_paging:
print("Page size: {}".format(self.page_size))
else:
self.page_size = self.default_page_size
def applycolor(self, text, color=None):
if not color or not self.color:
return text
return color + text + ANSI_RESET
def writeresult(self, text, color=None, newline=True, out=None):
if out is None:
out = self.query_out
# convert Exceptions, etc to text
if not isinstance(text, (unicode, str)):
text = unicode(text)
if isinstance(text, unicode):
text = text.encode(self.encoding)
to_write = self.applycolor(text, color) + ('\n' if newline else '')
out.write(to_write)
def flush_output(self):
self.query_out.flush()
def printerr(self, text, color=RED, newline=True, shownum=None):
self.statement_error = True
if shownum is None:
shownum = self.show_line_nums
if shownum:
text = '%s:%d:%s' % (self.stdin.name, self.lineno, text)
self.writeresult(text, color, newline=newline, out=sys.stderr)
class SwitchCommand(object):
command = None
description = None
def __init__(self, command, desc):
self.command = command
self.description = desc
def execute(self, state, parsed, printerr):
switch = parsed.get_binding('switch')
if switch is None:
if state:
print "%s is currently enabled. Use %s OFF to disable" \
% (self.description, self.command)
else:
print "%s is currently disabled. Use %s ON to enable." \
% (self.description, self.command)
return state
if switch.upper() == 'ON':
if state:
printerr('%s is already enabled. Use %s OFF to disable.'
% (self.description, self.command))
return state
print 'Now %s is enabled' % (self.description,)
return True
if switch.upper() == 'OFF':
if not state:
printerr('%s is not enabled.' % (self.description,))
return state
print 'Disabled %s.' % (self.description,)
return False
class SwitchCommandWithValue(SwitchCommand):
"""The same as SwitchCommand except it also accepts a value in place of ON.
This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE)
eg: PAGING 50 returns (True, 50)
PAGING OFF returns (False, None)
PAGING ON returns (True, None)
The value_type must match for the PASSED_VALUE, otherwise it will return None.
"""
def __init__(self, command, desc, value_type=int):
SwitchCommand.__init__(self, command, desc)
self.value_type = value_type
def execute(self, state, parsed, printerr):
binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr)
switch = parsed.get_binding('switch')
try:
value = self.value_type(switch)
binary_switch_value = True
except (ValueError, TypeError):
value = None
return (binary_switch_value, value)
def option_with_default(cparser_getter, section, option, default=None):
try:
return cparser_getter(section, option)
except ConfigParser.Error:
return default
def raw_option_with_default(configs, section, option, default=None):
"""
Same (almost) as option_with_default() but won't do any string interpolation.
Useful for config values that include '%' symbol, e.g. time format string.
"""
try:
return configs.get(section, option, raw=True)
except ConfigParser.Error:
return default
def should_use_color():
if not sys.stdout.isatty():
return False
if os.environ.get('TERM', '') in ('dumb', ''):
return False
try:
import subprocess
p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if int(stdout.strip()) < 8:
return False
except (OSError, ImportError, ValueError):
# oh well, we tried. at least we know there's a $TERM and it's
# not "dumb".
pass
return True
def read_options(cmdlineargs, environment):
configs = ConfigParser.SafeConfigParser()
configs.read(CONFIG_FILE)
rawconfigs = ConfigParser.RawConfigParser()
rawconfigs.read(CONFIG_FILE)
optvalues = optparse.Values()
optvalues.username = option_with_default(configs.get, 'authentication', 'username')
optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password')
optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
DEFAULT_COMPLETEKEY)
optvalues.color = option_with_default(configs.getboolean, 'ui', 'color')
optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format',
DEFAULT_TIMESTAMP_FORMAT)
optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format',
DEFAULT_NANOTIME_FORMAT)
optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format',
DEFAULT_DATE_FORMAT)
optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision',
DEFAULT_FLOAT_PRECISION)
optvalues.double_precision = option_with_default(configs.getint, 'ui', 'double_precision',
DEFAULT_DOUBLE_PRECISION)
optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit())
optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait',
DEFAULT_MAX_TRACE_WAIT)
optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None)
optvalues.debug = False
optvalues.file = None
optvalues.ssl = option_with_default(configs.getboolean, 'connection', 'ssl', DEFAULT_SSL)
optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8)
optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty())
optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', None)
optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
optvalues.execute = None
(options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST)
port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT)
try:
options.connect_timeout = int(options.connect_timeout)
except ValueError:
parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,))
options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS
try:
options.request_timeout = int(options.request_timeout)
except ValueError:
parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,))
options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS
hostname = environment.get('CQLSH_HOST', hostname)
port = environment.get('CQLSH_PORT', port)
if len(arguments) > 0:
hostname = arguments[0]
if len(arguments) > 1:
port = arguments[1]
if options.file or options.execute:
options.tty = False
if options.execute and not options.execute.endswith(';'):
options.execute += ';'
if optvalues.color in (True, False):
options.color = optvalues.color
else:
if options.file is not None:
options.color = False
else:
options.color = should_use_color()
if options.cqlversion is not None:
options.cqlversion, cqlvertup = full_cql_version(options.cqlversion)
if cqlvertup[0] < 3:
parser.error('%r is not a supported CQL version.' % options.cqlversion)
options.cqlmodule = cql3handling
try:
port = int(port)
except ValueError:
parser.error('%r is not a valid port number.' % port)
return options, hostname, port
def setup_cqlruleset(cqlmodule):
global cqlruleset
cqlruleset = cqlmodule.CqlRuleSet
cqlruleset.append_rules(cqlshhandling.cqlsh_extra_syntax_rules)
for rulename, termname, func in cqlshhandling.cqlsh_syntax_completers:
cqlruleset.completer_for(rulename, termname)(func)
cqlruleset.commands_end_with_newline.update(cqlshhandling.my_commands_ending_with_newline)
def setup_cqldocs(cqlmodule):
global cqldocs
cqldocs = cqlmodule.cqldocs
def init_history():
if readline is not None:
try:
readline.read_history_file(HISTORY)
except IOError:
pass
delims = readline.get_completer_delims()
delims.replace("'", "")
delims += '.'
readline.set_completer_delims(delims)
def save_history():
if readline is not None:
try:
readline.write_history_file(HISTORY)
except IOError:
pass
def main(options, hostname, port):
setup_cqlruleset(options.cqlmodule)
setup_cqldocs(options.cqlmodule)
init_history()
csv.field_size_limit(options.field_size_limit)
if options.file is None:
stdin = None
else:
try:
encoding, bom_size = get_file_encoding_bomsize(options.file)
stdin = codecs.open(options.file, 'r', encoding)
stdin.seek(bom_size)
except IOError, e:
sys.exit("Can't open %r: %s" % (options.file, e))
if options.debug:
sys.stderr.write("Using CQL driver: %s\n" % (cassandra,))
sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,))
sys.stderr.write("Using '%s' encoding\n" % (options.encoding,))
sys.stderr.write("Using ssl: %s\n" % (options.ssl,))
# create timezone based on settings, environment or auto-detection
timezone = None
if options.timezone or 'TZ' in os.environ:
try:
import pytz
if options.timezone:
try:
timezone = pytz.timezone(options.timezone)
except:
sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone))
if 'TZ' in os.environ:
try:
timezone = pytz.timezone(os.environ['TZ'])
except:
sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ']))
except ImportError:
sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n")
# try auto-detect timezone if tzlocal is installed
if not timezone:
try:
from tzlocal import get_localzone
timezone = get_localzone()
except ImportError:
# we silently ignore and fallback to UTC unless a custom timestamp format (which likely
# does contain a TZ part) was specified
if options.time_format != DEFAULT_TIMESTAMP_FORMAT:
sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, but local timezone could not be detected.\n" +
"Either install Python 'tzlocal' module for auto-detection or specify client timezone in your cqlshrc.\n\n")
try:
shell = Shell(hostname,
port,
color=options.color,
username=options.username,
password=options.password,
stdin=stdin,
tty=options.tty,
completekey=options.completekey,
browser=options.browser,
cqlver=options.cqlversion,
keyspace=options.keyspace,
display_timestamp_format=options.time_format,
display_nanotime_format=options.nanotime_format,
display_date_format=options.date_format,
display_float_precision=options.float_precision,
display_double_precision=options.double_precision,
display_timezone=timezone,
max_trace_wait=options.max_trace_wait,
ssl=options.ssl,
single_statement=options.execute,
request_timeout=options.request_timeout,
connect_timeout=options.connect_timeout,
encoding=options.encoding)
except KeyboardInterrupt:
sys.exit('Connection aborted.')
except CQL_ERRORS, e:
sys.exit('Connection error: %s' % (e,))
except VersionNotSupported, e:
sys.exit('Unsupported CQL version: %s' % (e,))
if options.debug:
shell.debug = True
shell.cmdloop()
save_history()
batch_mode = options.file or options.execute
if batch_mode and shell.statement_error:
sys.exit(2)
# always call this regardless of module name: when a sub-process is spawned
# on Windows then the module name is not __main__, see CASSANDRA-9304
insert_driver_hooks()
if __name__ == '__main__':
main(*read_options(sys.argv[1:], os.environ))
# vim: set ft=python et ts=4 sw=4 :
| []
| []
| [
"CQLSH_DEBUG_COMPLETION",
"XDG_DATA_DIRS",
"CQLSH_PROMPT",
"TERM",
"TZ",
"CQLSH_NO_BUNDLED"
]
| [] | ["CQLSH_DEBUG_COMPLETION", "XDG_DATA_DIRS", "CQLSH_PROMPT", "TERM", "TZ", "CQLSH_NO_BUNDLED"] | python | 6 | 0 | |
testar/src/org/testar/OutputStructure.java | /***************************************************************************************************
*
* Copyright (c) 2019 - 2021 Universitat Politecnica de Valencia - www.upv.es
* Copyright (c) 2019 - 2021 Open Universiteit - www.ou.nl
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************************************/
package org.testar;
import java.io.File;
import org.fruit.Util;
import org.fruit.monkey.ConfigTags;
import org.fruit.monkey.Main;
import org.fruit.monkey.Settings;
public class OutputStructure {
private OutputStructure() {}
public static final String DATE_FORMAT = "yyyy-MM-dd_HH-mm-ss";
public static String startOuterLoopDateString;
public static String startInnerLoopDateString;
public static String executedSUTname;
public static int sequenceInnerLoopCount;
public static String outerLoopOutputDir;
public static String sequencesOutputDir;
public static String screenshotsOutputDir;
public static String htmlOutputDir;
public static String logsOutputDir;
public static String debugLogsOutputDir;
public static String processListenerDir;
public static void calculateOuterLoopDateString() {
startOuterLoopDateString = "";
String date = Util.dateString(OutputStructure.DATE_FORMAT);
date = date + "s";
date = date.substring(0, 16) + "m" + date.substring(17);
date = date.substring(0, 13) + "h" + date.substring(14);
if (System.getenv("HOSTNAME") != null)
{
startOuterLoopDateString = System.getenv("HOSTNAME")+"_";
}
startOuterLoopDateString += date;
}
public static void calculateInnerLoopDateString() {
String date = Util.dateString(OutputStructure.DATE_FORMAT);
date = date + "s";
date = date.substring(0, 16) + "m" + date.substring(17);
date = date.substring(0, 13) + "h" + date.substring(14);
startInnerLoopDateString = date;
}
public static void createOutputSUTname(Settings settings) {
executedSUTname = "";
if(settings.get(ConfigTags.ApplicationName,"").equals("")) {
String sutConnectorValue = settings.get(ConfigTags.SUTConnectorValue);
sutConnectorValue = sutConnectorValue.replace("/", File.separator);
try {
if (sutConnectorValue.contains("http") && sutConnectorValue.contains("www.")) {
int indexWWW = sutConnectorValue.indexOf("www.")+4;
int indexEnd = sutConnectorValue.indexOf(".", indexWWW);
String domain = sutConnectorValue.substring(indexWWW, indexEnd);
executedSUTname = domain;
}
else if (sutConnectorValue.contains(".exe")) {
int startSUT = sutConnectorValue.lastIndexOf(File.separator)+1;
int endSUT = sutConnectorValue.indexOf(".exe");
String sutName = sutConnectorValue.substring(startSUT, endSUT);
executedSUTname = sutName;
}
else if (sutConnectorValue.contains(".jar")) {
int startSUT = sutConnectorValue.lastIndexOf(File.separator)+1;
int endSUT = sutConnectorValue.indexOf(".jar");
String sutName = sutConnectorValue.substring(startSUT, endSUT);
executedSUTname = sutName;
}
}catch(Exception e) {
System.out.println("Error: This run generation will be stored with empty name");
}
}else {
executedSUTname = settings.get(ConfigTags.ApplicationName,"");
}
String version = settings.get(ConfigTags.ApplicationVersion,"");
if(!version.isEmpty())
executedSUTname += "_" + version;
}
public static void createOutputFolders() {
outerLoopOutputDir = Main.outputDir + File.separator + startOuterLoopDateString + "_" + executedSUTname;
File runDir = new File(outerLoopOutputDir);
runDir.mkdirs();
//Check if main output folder was created correctly, if not use unknown name with timestamp
if(!runDir.exists()) {
runDir = new File(Main.outputDir + File.separator + startOuterLoopDateString + "_unknown");
runDir.mkdirs();
}
sequencesOutputDir = outerLoopOutputDir + File.separator + "sequences";
File seqDir = new File(sequencesOutputDir);
if(!seqDir.exists())
seqDir.mkdirs();
screenshotsOutputDir = outerLoopOutputDir + File.separator + "scrshots";
File scrnDir = new File(screenshotsOutputDir);
if(!scrnDir.exists())
scrnDir.mkdirs();
htmlOutputDir = outerLoopOutputDir + File.separator + "HTMLreports";
File htmlDir = new File(htmlOutputDir);
if(!htmlDir.exists())
htmlDir.mkdirs();
logsOutputDir = outerLoopOutputDir + File.separator + "logs";
File logsDir = new File(logsOutputDir);
if(!logsDir.exists())
logsDir.mkdirs();
debugLogsOutputDir = logsOutputDir + File.separator + "debug";
File logsDebugDir = new File(debugLogsOutputDir);
if(!logsDebugDir.exists())
logsDebugDir.mkdirs();
processListenerDir = logsOutputDir + File.separator + "processListener";
File procListDir = new File(processListenerDir);
if(!procListDir.exists())
procListDir.mkdirs();
}
}
| [
"\"HOSTNAME\"",
"\"HOSTNAME\""
]
| []
| [
"HOSTNAME"
]
| [] | ["HOSTNAME"] | java | 1 | 0 | |
flink-yarn-tests/src/test/java/org/apache/flink/yarn/YarnTestBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.yarn;
import org.apache.flink.api.common.time.Deadline;
import org.apache.flink.client.cli.CliFrontend;
import org.apache.flink.configuration.ConfigConstants;
import org.apache.flink.configuration.GlobalConfiguration;
import org.apache.flink.runtime.clusterframework.BootstrapTools;
import org.apache.flink.test.util.TestBaseUtils;
import org.apache.flink.util.Preconditions;
import org.apache.flink.util.TestLogger;
import org.apache.flink.util.function.RunnableWithException;
import org.apache.flink.yarn.cli.FlinkYarnSessionCli;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.rules.TemporaryFolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.Marker;
import org.slf4j.MarkerFactory;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.io.PrintStream;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Supplier;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.apache.flink.util.Preconditions.checkState;
import static org.junit.Assert.assertEquals;
/**
* This base class allows to use the MiniYARNCluster.
* The cluster is re-used for all tests.
*
* <p>This class is located in a different package which is build after flink-dist. This way,
* we can use the YARN uberjar of flink to start a Flink YARN session.
*
* <p>The test is not thread-safe. Parallel execution of tests is not possible!
*/
public abstract class YarnTestBase extends TestLogger {
private static final Logger LOG = LoggerFactory.getLogger(YarnTestBase.class);
protected static final PrintStream ORIGINAL_STDOUT = System.out;
protected static final PrintStream ORIGINAL_STDERR = System.err;
private static final InputStream ORIGINAL_STDIN = System.in;
protected static final String TEST_CLUSTER_NAME_KEY = "flink-yarn-minicluster-name";
protected static final int NUM_NODEMANAGERS = 2;
/** The tests are scanning for these strings in the final output. */
protected static final String[] PROHIBITED_STRINGS = {
"Exception", // we don't want any exceptions to happen
"Started [email protected]:8081" // Jetty should start on a random port in YARN mode.
};
/** These strings are white-listed, overriding the prohibited strings. */
protected static final String[] WHITELISTED_STRINGS = {
"akka.remote.RemoteTransportExceptionNoStackTrace",
// workaround for annoying InterruptedException logging:
// https://issues.apache.org/jira/browse/YARN-1022
"java.lang.InterruptedException",
// very specific on purpose
"Remote connection to [null] failed with java.net.ConnectException: Connection refused",
"Remote connection to [null] failed with java.nio.channels.NotYetConnectedException",
"java.io.IOException: Connection reset by peer",
// filter out expected ResourceManagerException caused by intended shutdown request
YarnResourceManager.ERROR_MASSAGE_ON_SHUTDOWN_REQUEST,
// this can happen in Akka 2.4 on shutdown.
"java.util.concurrent.RejectedExecutionException: Worker has already been shutdown",
"org.apache.flink.util.FlinkException: Stopping JobMaster",
"org.apache.flink.util.FlinkException: JobManager is shutting down.",
"lost the leadership."
};
// Temp directory which is deleted after the unit test.
@ClassRule
public static TemporaryFolder tmp = new TemporaryFolder();
// Temp directory for mini hdfs
@ClassRule
public static TemporaryFolder tmpHDFS = new TemporaryFolder();
protected static MiniYARNCluster yarnCluster = null;
protected static MiniDFSCluster miniDFSCluster = null;
/**
* Uberjar (fat jar) file of Flink.
*/
protected static File flinkUberjar;
protected static final YarnConfiguration YARN_CONFIGURATION;
/**
* lib/ folder of the flink distribution.
*/
protected static File flinkLibFolder;
/**
* Temporary folder where Flink configurations will be kept for secure run.
*/
protected static File tempConfPathForSecureRun = null;
protected static File yarnSiteXML = null;
protected static File hdfsSiteXML = null;
private YarnClient yarnClient = null;
private static org.apache.flink.configuration.Configuration globalConfiguration;
protected org.apache.flink.configuration.Configuration flinkConfiguration;
static {
YARN_CONFIGURATION = new YarnConfiguration();
YARN_CONFIGURATION.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 32);
YARN_CONFIGURATION.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 4096); // 4096 is the available memory anyways
YARN_CONFIGURATION.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true);
YARN_CONFIGURATION.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
YARN_CONFIGURATION.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 2);
YARN_CONFIGURATION.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 4);
YARN_CONFIGURATION.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 3600);
YARN_CONFIGURATION.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
YARN_CONFIGURATION.setInt(YarnConfiguration.NM_VCORES, 666); // memory is overwritten in the MiniYARNCluster.
// so we have to change the number of cores for testing.
YARN_CONFIGURATION.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 20000); // 20 seconds expiry (to ensure we properly heartbeat with YARN).
YARN_CONFIGURATION.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 99.0F);
YARN_CONFIGURATION.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, getYarnClasspath());
}
/**
* Searches for the yarn.classpath file generated by the "dependency:build-classpath" maven plugin in
* "flink-yarn".
* @return a classpath suitable for running all YARN-launched JVMs
*/
private static String getYarnClasspath() {
final String start = "../flink-yarn-tests";
try {
File classPathFile = findFile(start, (dir, name) -> name.equals("yarn.classpath"));
return FileUtils.readFileToString(classPathFile); // potential NPE is supposed to be fatal
} catch (Throwable t) {
LOG.error("Error while getting YARN classpath in {}", new File(start).getAbsoluteFile(), t);
throw new RuntimeException("Error while getting YARN classpath", t);
}
}
public static void populateYarnSecureConfigurations(Configuration conf, String principal, String keytab) {
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
conf.set(YarnConfiguration.RM_KEYTAB, keytab);
conf.set(YarnConfiguration.RM_PRINCIPAL, principal);
conf.set(YarnConfiguration.NM_KEYTAB, keytab);
conf.set(YarnConfiguration.NM_PRINCIPAL, principal);
conf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, principal);
conf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY, keytab);
conf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY, principal);
conf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY, keytab);
conf.set("hadoop.security.auth_to_local", "RULE:[1:$1] RULE:[2:$1]");
}
@Before
public void setupYarnClient() {
if (yarnClient == null) {
yarnClient = YarnClient.createYarnClient();
yarnClient.init(getYarnConfiguration());
yarnClient.start();
}
flinkConfiguration = new org.apache.flink.configuration.Configuration(globalConfiguration);
}
/**
* Sleep a bit between the tests (we are re-using the YARN cluster for the tests).
*/
@After
public void shutdownYarnClient() {
yarnClient.stop();
}
protected void runTest(RunnableWithException test) throws Exception {
// wrapping the cleanup logic in an AutoClosable automatically suppresses additional exceptions
try (final CleanupYarnApplication ignored = new CleanupYarnApplication()) {
test.run();
}
}
private class CleanupYarnApplication implements AutoCloseable {
@Override
public void close() throws Exception {
Deadline deadline = Deadline.now().plus(Duration.ofSeconds(10));
boolean isAnyJobRunning = yarnClient.getApplications().stream()
.anyMatch(YarnTestBase::isApplicationRunning);
while (deadline.hasTimeLeft() && isAnyJobRunning) {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
Assert.fail("Should not happen");
}
isAnyJobRunning = yarnClient.getApplications().stream()
.anyMatch(YarnTestBase::isApplicationRunning);
}
if (isAnyJobRunning) {
final List<String> runningApps = yarnClient.getApplications().stream()
.filter(YarnTestBase::isApplicationRunning)
.map(app -> "App " + app.getApplicationId() + " is in state " + app.getYarnApplicationState() + '.')
.collect(Collectors.toList());
if (!runningApps.isEmpty()) {
Assert.fail("There is at least one application on the cluster that is not finished." + runningApps);
}
}
}
}
private static boolean isApplicationRunning(ApplicationReport app) {
final YarnApplicationState yarnApplicationState = app.getYarnApplicationState();
return yarnApplicationState != YarnApplicationState.FINISHED
&& app.getYarnApplicationState() != YarnApplicationState.KILLED
&& app.getYarnApplicationState() != YarnApplicationState.FAILED;
}
@Nullable
protected YarnClient getYarnClient() {
return yarnClient;
}
protected static YarnConfiguration getYarnConfiguration() {
return YARN_CONFIGURATION;
}
/**
* Locate a file or directory.
*/
public static File findFile(String startAt, FilenameFilter fnf) {
File root = new File(startAt);
String[] files = root.list();
if (files == null) {
return null;
}
for (String file : files) {
File f = new File(startAt + File.separator + file);
if (f.isDirectory()) {
File r = findFile(f.getAbsolutePath(), fnf);
if (r != null) {
return r;
}
} else if (fnf.accept(f.getParentFile(), f.getName())) {
return f;
}
}
return null;
}
@Nonnull
YarnClusterDescriptor createYarnClusterDescriptor(org.apache.flink.configuration.Configuration flinkConfiguration) {
final YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptorWithoutLibDir(flinkConfiguration);
yarnClusterDescriptor.addShipFiles(Collections.singletonList(flinkLibFolder));
return yarnClusterDescriptor;
}
YarnClusterDescriptor createYarnClusterDescriptorWithoutLibDir(org.apache.flink.configuration.Configuration flinkConfiguration) {
final YarnClusterDescriptor yarnClusterDescriptor = YarnTestUtils.createClusterDescriptorWithLogging(
tempConfPathForSecureRun.getAbsolutePath(),
flinkConfiguration,
YARN_CONFIGURATION,
yarnClient,
true);
yarnClusterDescriptor.setLocalJarPath(new Path(flinkUberjar.toURI()));
return yarnClusterDescriptor;
}
/**
* Filter to find root dir of the flink-yarn dist.
*/
public static class RootDirFilenameFilter implements FilenameFilter {
@Override
public boolean accept(File dir, String name) {
return name.startsWith("flink-dist") && name.endsWith(".jar") && dir.toString().contains("/lib");
}
}
/**
* A simple {@link FilenameFilter} that only accepts files if their name contains every string in the array passed
* to the constructor.
*/
public static class ContainsName implements FilenameFilter {
private String[] names;
private String excludeInPath = null;
/**
* @param names which have to be included in the filename.
*/
public ContainsName(String[] names) {
this.names = names;
}
public ContainsName(String[] names, String excludeInPath) {
this.names = names;
this.excludeInPath = excludeInPath;
}
@Override
public boolean accept(File dir, String name) {
if (excludeInPath == null) {
for (String n: names) {
if (!name.contains(n)) {
return false;
}
}
return true;
} else {
for (String n: names) {
if (!name.contains(n)) {
return false;
}
}
return !dir.toString().contains(excludeInPath);
}
}
}
// write yarn-site.xml to target/test-classes so that flink pick can pick up this when
// initializing YarnClient properly from classpath
public static void writeYarnSiteConfigXML(Configuration yarnConf, File targetFolder) throws IOException {
yarnSiteXML = new File(targetFolder, "/yarn-site.xml");
try (FileWriter writer = new FileWriter(yarnSiteXML)) {
yarnConf.writeXml(writer);
writer.flush();
}
}
private static void writeHDFSSiteConfigXML(Configuration coreSite, File targetFolder) throws IOException {
hdfsSiteXML = new File(targetFolder, "/hdfs-site.xml");
try (FileWriter writer = new FileWriter(hdfsSiteXML)) {
coreSite.writeXml(writer);
writer.flush();
}
}
/**
* This method checks the written TaskManager and JobManager log files
* for exceptions.
*
* <p>WARN: Please make sure the tool doesn't find old logfiles from previous test runs.
* So always run "mvn clean" before running the tests here.
*
*/
public static void ensureNoProhibitedStringInLogFiles(final String[] prohibited, final String[] whitelisted) {
File cwd = new File("target/" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY));
Assert.assertTrue("Expecting directory " + cwd.getAbsolutePath() + " to exist", cwd.exists());
Assert.assertTrue("Expecting directory " + cwd.getAbsolutePath() + " to be a directory", cwd.isDirectory());
List<String> prohibitedExcerpts = new ArrayList<>();
File foundFile = findFile(cwd.getAbsolutePath(), new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
// scan each file for prohibited strings.
File f = new File(dir.getAbsolutePath() + "/" + name);
try {
BufferingScanner scanner = new BufferingScanner(new Scanner(f), 10);
while (scanner.hasNextLine()) {
final String lineFromFile = scanner.nextLine();
for (String aProhibited : prohibited) {
if (lineFromFile.contains(aProhibited)) {
boolean whitelistedFound = false;
for (String white : whitelisted) {
if (lineFromFile.contains(white)) {
whitelistedFound = true;
break;
}
}
if (!whitelistedFound) {
// logging in FATAL to see the actual message in TRAVIS tests.
Marker fatal = MarkerFactory.getMarker("FATAL");
LOG.error(fatal, "Prohibited String '{}' in '{}:{}'", aProhibited, f.getAbsolutePath(), lineFromFile);
StringBuilder logExcerpt = new StringBuilder();
logExcerpt.append(System.lineSeparator());
// include some previous lines in case of irregular formatting
for (String previousLine : scanner.getPreviousLines()) {
logExcerpt.append(previousLine);
logExcerpt.append(System.lineSeparator());
}
logExcerpt.append(lineFromFile);
logExcerpt.append(System.lineSeparator());
// extract potential stack trace from log
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
logExcerpt.append(line);
logExcerpt.append(System.lineSeparator());
if (line.isEmpty() || (!Character.isWhitespace(line.charAt(0)) && !line.startsWith("Caused by"))) {
// the cause has been printed, now add a few more lines in case of irregular formatting
for (int x = 0; x < 10 && scanner.hasNextLine(); x++) {
logExcerpt.append(scanner.nextLine());
logExcerpt.append(System.lineSeparator());
}
break;
}
}
prohibitedExcerpts.add(logExcerpt.toString());
return true;
}
}
}
}
} catch (FileNotFoundException e) {
LOG.warn("Unable to locate file: " + e.getMessage() + " file: " + f.getAbsolutePath());
}
return false;
}
});
if (foundFile != null) {
Scanner scanner = null;
try {
scanner = new Scanner(foundFile);
} catch (FileNotFoundException e) {
Assert.fail("Unable to locate file: " + e.getMessage() + " file: " + foundFile.getAbsolutePath());
}
LOG.warn("Found a file with a prohibited string. Printing contents:");
while (scanner.hasNextLine()) {
LOG.warn("LINE: " + scanner.nextLine());
}
Assert.fail(
"Found a file " + foundFile + " with a prohibited string (one of " + Arrays.toString(prohibited) + "). " +
"Excerpts:" + System.lineSeparator() + prohibitedExcerpts);
}
}
public static boolean verifyStringsInNamedLogFiles(
final String[] mustHave, final String fileName) {
List<String> mustHaveList = Arrays.asList(mustHave);
File cwd = new File("target/" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY));
if (!cwd.exists() || !cwd.isDirectory()) {
return false;
}
File foundFile = findFile(cwd.getAbsolutePath(), new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (fileName != null && !name.equals(fileName)) {
return false;
}
File f = new File(dir.getAbsolutePath() + "/" + name);
LOG.info("Searching in {}", f.getAbsolutePath());
try (Scanner scanner = new Scanner(f)) {
Set<String> foundSet = new HashSet<>(mustHave.length);
while (scanner.hasNextLine()) {
final String lineFromFile = scanner.nextLine();
for (String str : mustHave) {
if (lineFromFile.contains(str)) {
foundSet.add(str);
}
}
if (foundSet.containsAll(mustHaveList)) {
return true;
}
}
} catch (FileNotFoundException e) {
LOG.warn("Unable to locate file: " + e.getMessage() + " file: " + f.getAbsolutePath());
}
return false;
}
});
if (foundFile != null) {
LOG.info("Found string {} in {}.", Arrays.toString(mustHave), foundFile.getAbsolutePath());
return true;
} else {
return false;
}
}
public static boolean verifyTokenKindInContainerCredentials(final Collection<String> tokens, final String containerId)
throws IOException {
File cwd = new File("target/" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY));
if (!cwd.exists() || !cwd.isDirectory()) {
return false;
}
File containerTokens = findFile(cwd.getAbsolutePath(), new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.equals(containerId + ".tokens");
}
});
if (containerTokens != null) {
LOG.info("Verifying tokens in {}", containerTokens.getAbsolutePath());
Credentials tmCredentials = Credentials.readTokenStorageFile(containerTokens, new Configuration());
Collection<Token<? extends TokenIdentifier>> userTokens = tmCredentials.getAllTokens();
Set<String> tokenKinds = new HashSet<>(4);
for (Token<? extends TokenIdentifier> token : userTokens) {
tokenKinds.add(token.getKind().toString());
}
return tokenKinds.containsAll(tokens);
} else {
LOG.warn("Unable to find credential file for container {}", containerId);
return false;
}
}
public static String getContainerIdByLogName(String logName) {
File cwd = new File("target/" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY));
File containerLog = findFile(cwd.getAbsolutePath(), new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.equals(logName);
}
});
if (containerLog != null) {
return containerLog.getParentFile().getName();
} else {
throw new IllegalStateException("No container has log named " + logName);
}
}
public static void sleep(int time) {
try {
Thread.sleep(time);
} catch (InterruptedException e) {
LOG.warn("Interruped", e);
}
}
public static int getRunningContainers() {
int count = 0;
for (int nmId = 0; nmId < NUM_NODEMANAGERS; nmId++) {
NodeManager nm = yarnCluster.getNodeManager(nmId);
ConcurrentMap<ContainerId, Container> containers = nm.getNMContext().getContainers();
count += containers.size();
}
return count;
}
protected ApplicationReport getOnlyApplicationReport() throws IOException, YarnException {
final YarnClient yarnClient = getYarnClient();
checkState(yarnClient != null);
final List<ApplicationReport> apps = yarnClient.getApplications(EnumSet.of(YarnApplicationState.RUNNING));
assertEquals(1, apps.size()); // Only one running
return apps.get(0);
}
public static void startYARNSecureMode(YarnConfiguration conf, String principal, String keytab) {
start(conf, principal, keytab, false);
}
public static void startYARNWithConfig(YarnConfiguration conf) {
startYARNWithConfig(conf, false);
}
public static void startYARNWithConfig(YarnConfiguration conf, boolean withDFS) {
start(conf, null, null, withDFS);
}
private static void start(YarnConfiguration conf, String principal, String keytab, boolean withDFS) {
// set the home directory to a temp directory. Flink on YARN is using the home dir to distribute the file
File homeDir = null;
try {
homeDir = tmp.newFolder();
} catch (IOException e) {
e.printStackTrace();
Assert.fail(e.getMessage());
}
System.setProperty("user.home", homeDir.getAbsolutePath());
String uberjarStartLoc = "..";
LOG.info("Trying to locate uberjar in {}", new File(uberjarStartLoc).getAbsolutePath());
flinkUberjar = findFile(uberjarStartLoc, new RootDirFilenameFilter());
Assert.assertNotNull("Flink uberjar not found", flinkUberjar);
String flinkDistRootDir = flinkUberjar.getParentFile().getParent();
flinkLibFolder = flinkUberjar.getParentFile(); // the uberjar is located in lib/
Assert.assertNotNull("Flink flinkLibFolder not found", flinkLibFolder);
Assert.assertTrue("lib folder not found", flinkLibFolder.exists());
Assert.assertTrue("lib folder not found", flinkLibFolder.isDirectory());
if (!flinkUberjar.exists()) {
Assert.fail("Unable to locate yarn-uberjar.jar");
}
try {
LOG.info("Starting up MiniYARNCluster");
if (yarnCluster == null) {
final String testName = conf.get(YarnTestBase.TEST_CLUSTER_NAME_KEY);
yarnCluster = new MiniYARNCluster(
testName == null ? "YarnTest_" + UUID.randomUUID() : testName,
NUM_NODEMANAGERS,
1,
1);
yarnCluster.init(conf);
yarnCluster.start();
}
Map<String, String> map = new HashMap<String, String>(System.getenv());
File flinkConfDirPath = findFile(flinkDistRootDir, new ContainsName(new String[]{"flink-conf.yaml"}));
Assert.assertNotNull(flinkConfDirPath);
final String confDirPath = flinkConfDirPath.getParentFile().getAbsolutePath();
globalConfiguration = GlobalConfiguration.loadConfiguration(confDirPath);
//copy conf dir to test temporary workspace location
tempConfPathForSecureRun = tmp.newFolder("conf");
FileUtils.copyDirectory(new File(confDirPath), tempConfPathForSecureRun);
BootstrapTools.writeConfiguration(
globalConfiguration,
new File(tempConfPathForSecureRun, "flink-conf.yaml"));
String configDir = tempConfPathForSecureRun.getAbsolutePath();
LOG.info("Temporary Flink configuration directory to be used for secure test: {}", configDir);
Assert.assertNotNull(configDir);
map.put(ConfigConstants.ENV_FLINK_CONF_DIR, configDir);
File targetTestClassesFolder = new File("target/test-classes");
writeYarnSiteConfigXML(conf, targetTestClassesFolder);
if (withDFS) {
LOG.info("Starting up MiniDFSCluster");
setMiniDFSCluster(targetTestClassesFolder);
}
map.put("IN_TESTS", "yes we are in tests"); // see YarnClusterDescriptor() for more infos
map.put("YARN_CONF_DIR", targetTestClassesFolder.getAbsolutePath());
TestBaseUtils.setEnv(map);
Assert.assertTrue(yarnCluster.getServiceState() == Service.STATE.STARTED);
// wait for the nodeManagers to connect
while (!yarnCluster.waitForNodeManagersToConnect(500)) {
LOG.info("Waiting for Nodemanagers to connect");
}
} catch (Exception ex) {
ex.printStackTrace();
LOG.error("setup failure", ex);
Assert.fail();
}
}
private static void setMiniDFSCluster(File targetTestClassesFolder) throws Exception {
if (miniDFSCluster == null) {
Configuration hdfsConfiguration = new Configuration();
hdfsConfiguration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpHDFS.getRoot().getAbsolutePath());
miniDFSCluster = new MiniDFSCluster
.Builder(hdfsConfiguration)
.numDataNodes(2)
.build();
miniDFSCluster.waitClusterUp();
hdfsConfiguration = miniDFSCluster.getConfiguration(0);
writeHDFSSiteConfigXML(hdfsConfiguration, targetTestClassesFolder);
YARN_CONFIGURATION.addResource(hdfsConfiguration);
}
}
/**
* Default @BeforeClass impl. Overwrite this for passing a different configuration
*/
@BeforeClass
public static void setup() throws Exception {
startYARNWithConfig(YARN_CONFIGURATION, false);
}
// -------------------------- Runner -------------------------- //
protected static ByteArrayOutputStream outContent;
protected static ByteArrayOutputStream errContent;
enum RunTypes {
YARN_SESSION, CLI_FRONTEND
}
/**
* This method returns once the "startedAfterString" has been seen.
*/
protected Runner startWithArgs(String[] args, String startedAfterString, RunTypes type) throws IOException {
LOG.info("Running with args {}", Arrays.toString(args));
outContent = new ByteArrayOutputStream();
errContent = new ByteArrayOutputStream();
PipedOutputStream out = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(out);
PrintStream stdinPrintStream = new PrintStream(out);
System.setOut(new PrintStream(outContent));
System.setErr(new PrintStream(errContent));
System.setIn(in);
final int startTimeoutSeconds = 60;
Runner runner = new Runner(
args,
flinkConfiguration,
CliFrontend.getConfigurationDirectoryFromEnv(),
type,
0,
stdinPrintStream);
runner.setName("Frontend (CLI/YARN Client) runner thread (startWithArgs()).");
runner.start();
for (int second = 0; second < startTimeoutSeconds; second++) {
sleep(1000);
// check output for correct TaskManager startup.
if (outContent.toString().contains(startedAfterString)
|| errContent.toString().contains(startedAfterString)) {
LOG.info("Found expected output in redirected streams");
return runner;
}
// check if thread died
if (!runner.isAlive()) {
resetStreamsAndSendOutput();
if (runner.getRunnerError() != null) {
throw new RuntimeException("Runner failed with exception.", runner.getRunnerError());
}
Assert.fail("Runner thread died before the test was finished.");
}
}
resetStreamsAndSendOutput();
Assert.fail("During the timeout period of " + startTimeoutSeconds + " seconds the " +
"expected string did not show up");
return null;
}
protected void runWithArgs(String[] args, String terminateAfterString, String[] failOnStrings, RunTypes type, int returnCode) throws IOException {
runWithArgs(args, terminateAfterString, failOnStrings, type, returnCode, Collections::emptyList);
}
/**
* The test has been passed once the "terminateAfterString" has been seen.
* @param args Command line arguments for the runner
* @param terminateAfterString the runner is searching the stdout and stderr for this string. as soon as it appears, the test has passed
* @param failOnPatterns The runner is searching stdout and stderr for the pattern (regexp) specified here. If one appears, the test has failed
* @param type Set the type of the runner
* @param expectedReturnValue Expected return code from the runner.
* @param logMessageSupplier Supplier for log messages
*/
protected void runWithArgs(String[] args, String terminateAfterString, String[] failOnPatterns, RunTypes type, int expectedReturnValue, Supplier<Collection<String>> logMessageSupplier) throws IOException {
LOG.info("Running with args {}", Arrays.toString(args));
outContent = new ByteArrayOutputStream();
errContent = new ByteArrayOutputStream();
PipedOutputStream out = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(out);
PrintStream stdinPrintStream = new PrintStream(out);
System.setOut(new PrintStream(outContent));
System.setErr(new PrintStream(errContent));
System.setIn(in);
// we wait for at most three minutes
final int startTimeoutSeconds = 180;
final long deadline = System.currentTimeMillis() + (startTimeoutSeconds * 1000);
Runner runner = new Runner(
args,
flinkConfiguration,
CliFrontend.getConfigurationDirectoryFromEnv(),
type,
expectedReturnValue,
stdinPrintStream);
runner.start();
boolean expectedStringSeen = false;
boolean testPassedFromLog4j = false;
long shutdownTimeout = 30000L;
do {
sleep(1000);
String outContentString = outContent.toString();
String errContentString = errContent.toString();
if (failOnPatterns != null) {
for (String failOnString : failOnPatterns) {
Pattern pattern = Pattern.compile(failOnString);
if (pattern.matcher(outContentString).find() || pattern.matcher(errContentString).find()) {
LOG.warn("Failing test. Output contained illegal string '" + failOnString + "'");
resetStreamsAndSendOutput();
// stopping runner.
runner.sendStop();
// wait for the thread to stop
try {
runner.join(shutdownTimeout);
} catch (InterruptedException e) {
LOG.warn("Interrupted while stopping runner", e);
}
Assert.fail("Output contained illegal string '" + failOnString + "'");
}
}
}
for (String logMessage : logMessageSupplier.get()) {
if (logMessage.contains(terminateAfterString)) {
testPassedFromLog4j = true;
LOG.info("Found expected output in logging event {}", logMessage);
}
}
if (outContentString.contains(terminateAfterString) || errContentString.contains(terminateAfterString) || testPassedFromLog4j) {
expectedStringSeen = true;
LOG.info("Found expected output in redirected streams");
// send "stop" command to command line interface
LOG.info("RunWithArgs: request runner to stop");
runner.sendStop();
// wait for the thread to stop
try {
runner.join(shutdownTimeout);
}
catch (InterruptedException e) {
LOG.warn("Interrupted while stopping runner", e);
}
LOG.warn("RunWithArgs runner stopped.");
}
else {
// check if thread died
if (!runner.isAlive()) {
// leave loop: the runner died, so we can not expect new strings to show up.
break;
}
}
}
while (runner.getRunnerError() == null && !expectedStringSeen && System.currentTimeMillis() < deadline);
resetStreamsAndSendOutput();
if (runner.getRunnerError() != null) {
// this lets the test fail.
throw new RuntimeException("Runner failed", runner.getRunnerError());
}
Assert.assertTrue("During the timeout period of " + startTimeoutSeconds + " seconds the " +
"expected string \"" + terminateAfterString + "\" did not show up.", expectedStringSeen);
LOG.info("Test was successful");
}
protected static void resetStreamsAndSendOutput() {
System.setOut(ORIGINAL_STDOUT);
System.setErr(ORIGINAL_STDERR);
System.setIn(ORIGINAL_STDIN);
LOG.info("Sending stdout content through logger: \n\n{}\n\n", outContent.toString());
LOG.info("Sending stderr content through logger: \n\n{}\n\n", errContent.toString());
}
/**
* Utility class to run yarn jobs.
*/
protected static class Runner extends Thread {
private final String[] args;
private final org.apache.flink.configuration.Configuration configuration;
private final String configurationDirectory;
private final int expectedReturnValue;
private final PrintStream stdinPrintStream;
private RunTypes type;
private FlinkYarnSessionCli yCli;
private Throwable runnerError;
public Runner(
String[] args,
org.apache.flink.configuration.Configuration configuration,
String configurationDirectory,
RunTypes type,
int expectedReturnValue,
PrintStream stdinPrintStream) {
this.args = args;
this.configuration = Preconditions.checkNotNull(configuration);
this.configurationDirectory = Preconditions.checkNotNull(configurationDirectory);
this.type = type;
this.expectedReturnValue = expectedReturnValue;
this.stdinPrintStream = Preconditions.checkNotNull(stdinPrintStream);
}
@Override
public void run() {
try {
int returnValue;
switch (type) {
case YARN_SESSION:
yCli = new FlinkYarnSessionCli(
configuration,
configurationDirectory,
"",
"",
true);
returnValue = yCli.run(args);
break;
case CLI_FRONTEND:
try {
CliFrontend cli = new CliFrontend(
configuration,
CliFrontend.loadCustomCommandLines(configuration, configurationDirectory));
returnValue = cli.parseParameters(args);
} catch (Exception e) {
throw new RuntimeException("Failed to execute the following args with CliFrontend: "
+ Arrays.toString(args), e);
}
break;
default:
throw new RuntimeException("Unknown type " + type);
}
if (returnValue != this.expectedReturnValue) {
Assert.fail("The YARN session returned with unexpected value=" + returnValue + " expected=" + expectedReturnValue);
}
} catch (Throwable t) {
LOG.info("Runner stopped with exception", t);
// save error.
this.runnerError = t;
}
}
/** Stops the Yarn session. */
public void sendStop() {
stdinPrintStream.println("stop");
}
public Throwable getRunnerError() {
return runnerError;
}
}
// -------------------------- Tear down -------------------------- //
@AfterClass
public static void teardown() throws Exception {
if (yarnCluster != null) {
LOG.info("Stopping MiniYarn Cluster");
yarnCluster.stop();
yarnCluster = null;
}
if (miniDFSCluster != null) {
LOG.info("Stopping MiniDFS Cluster");
miniDFSCluster.shutdown();
miniDFSCluster = null;
}
// Unset FLINK_CONF_DIR, as it might change the behavior of other tests
Map<String, String> map = new HashMap<>(System.getenv());
map.remove(ConfigConstants.ENV_FLINK_CONF_DIR);
map.remove("YARN_CONF_DIR");
map.remove("IN_TESTS");
TestBaseUtils.setEnv(map);
if (tempConfPathForSecureRun != null) {
FileUtil.fullyDelete(tempConfPathForSecureRun);
tempConfPathForSecureRun = null;
}
if (yarnSiteXML != null) {
yarnSiteXML.delete();
}
if (hdfsSiteXML != null) {
hdfsSiteXML.delete();
}
// When we are on travis, we copy the temp files of JUnit (containing the MiniYARNCluster log files)
// to <flinkRoot>/target/flink-yarn-tests-*.
// The files from there are picked up by the ./tools/travis_watchdog.sh script
// to upload them to Amazon S3.
if (isOnTravis()) {
File target = new File("../target" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY));
if (!target.mkdirs()) {
LOG.warn("Error creating dirs to {}", target);
}
File src = tmp.getRoot();
LOG.info("copying the final files from {} to {}", src.getAbsolutePath(), target.getAbsolutePath());
try {
FileUtils.copyDirectoryToDirectory(src, target);
} catch (IOException e) {
LOG.warn("Error copying the final files from {} to {}: msg: {}", src.getAbsolutePath(), target.getAbsolutePath(), e.getMessage(), e);
}
}
}
public static boolean isOnTravis() {
return System.getenv("TRAVIS") != null && System.getenv("TRAVIS").equals("true");
}
protected void waitApplicationFinishedElseKillIt(
ApplicationId applicationId,
Duration timeout,
YarnClusterDescriptor yarnClusterDescriptor,
int sleepIntervalInMS) throws Exception {
Deadline deadline = Deadline.now().plus(timeout);
YarnApplicationState state = getYarnClient().getApplicationReport(applicationId).getYarnApplicationState();
while (state != YarnApplicationState.FINISHED) {
if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) {
Assert.fail("Application became FAILED or KILLED while expecting FINISHED");
}
if (deadline.isOverdue()) {
yarnClusterDescriptor.killCluster(applicationId);
Assert.fail("Application didn't finish before timeout");
}
sleep(sleepIntervalInMS);
state = getYarnClient().getApplicationReport(applicationId).getYarnApplicationState();
}
}
/**
* Wrapper around a {@link Scanner} that buffers the last N lines read.
*/
private static class BufferingScanner {
private final Scanner scanner;
private final int numLinesBuffered;
private final List<String> bufferedLines;
BufferingScanner(Scanner scanner, int numLinesBuffered) {
this.scanner = scanner;
this.numLinesBuffered = numLinesBuffered;
this.bufferedLines = new ArrayList<>(numLinesBuffered);
}
public boolean hasNextLine() {
return scanner.hasNextLine();
}
public String nextLine() {
if (bufferedLines.size() == numLinesBuffered) {
bufferedLines.remove(0);
}
String line = scanner.nextLine();
bufferedLines.add(line);
return line;
}
public List<String> getPreviousLines() {
return new ArrayList<>(bufferedLines);
}
}
}
| [
"\"TRAVIS\"",
"\"TRAVIS\""
]
| []
| [
"TRAVIS"
]
| [] | ["TRAVIS"] | java | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.