filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
backends/ios/src/io/nondev/nonfilesystem/IOSFiles.java
|
/*******************************************************************************
* Original source:
* Copyright 2011 See https://github.com/libgdx/libgdx/blob/master/AUTHORS
* under the Apache License included as LICENSE-LibGDX
*
* Modifications:
* Copyright 2015 Thomas Slusny
* Rewrote entire LibGDX filesystem to be non-LibGDX dependent. These
* modifications are licensed under below license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
******************************************************************************/
package io.nondev.nonfilesystem;
import org.robovm.apple.foundation.NSBundle;
public class IOSFiles implements Files {
// TODO: Use NSSearchPathForDirectoriesInDomains instead?
// $HOME should point to the app root dir.
static final String appDir = System.getenv("HOME");
static final String externalPath = appDir + "/Documents/";
static final String localPath = appDir + "/Library/local/";
static final String internalPath = NSBundle.getMainBundle().getBundlePath();
public IOSFiles () {
new FileHandle(this, externalPath).mkdirs();
new FileHandle(this, localPath).mkdirs();
}
@Override
public FileHandle getFileHandle (String fileName, FileType type) {
return new IOSFileHandle(this, fileName, type);
}
@Override
public FileHandle classpath (String path) {
return new IOSFileHandle(this, path, FileType.Classpath);
}
@Override
public FileHandle internal (String path) {
return new IOSFileHandle(this, path, FileType.Internal);
}
@Override
public FileHandle external (String path) {
return new IOSFileHandle(this, path, FileType.External);
}
@Override
public FileHandle absolute (String path) {
return new IOSFileHandle(this, path, FileType.Absolute);
}
@Override
public FileHandle local (String path) {
return new IOSFileHandle(this, path, FileType.Local);
}
@Override
public String getExternalStoragePath () {
return externalPath;
}
@Override
public boolean isExternalStorageAvailable () {
return true;
}
@Override
public String getLocalStoragePath () {
return localPath;
}
@Override
public boolean isLocalStorageAvailable () {
return true;
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
java
| 1 | 0 | |
integration-cli/docker_cli_pull_local_test.go
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/cli/build"
"github.com/go-check/check"
"github.com/gotestyourself/gotestyourself/icmd"
"github.com/opencontainers/go-digest"
)
// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other
// tags for the same image) are not also pulled down.
//
// Ref: docker/docker#8141
func testPullImageWithAliases(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
repos := []string{}
for _, tag := range []string{"recent", "fresh"} {
repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag))
}
// Tag and push the same image multiple times.
for _, repo := range repos {
dockerCmd(c, "tag", "busybox", repo)
dockerCmd(c, "push", repo)
}
// Clear local images store.
args := append([]string{"rmi"}, repos...)
dockerCmd(c, args...)
// Pull a single tag and verify it doesn't bring down all aliases.
dockerCmd(c, "pull", repos[0])
dockerCmd(c, "inspect", repos[0])
for _, repo := range repos[1:] {
_, _, err := dockerCmdWithError("inspect", repo)
c.Assert(err, checker.NotNil, check.Commentf("Image %v shouldn't have been pulled down", repo))
}
}
func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) {
testPullImageWithAliases(c)
}
func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) {
testPullImageWithAliases(c)
}
// testConcurrentPullWholeRepo pulls the same repo concurrently.
func testConcurrentPullWholeRepo(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
repos := []string{}
for _, tag := range []string{"recent", "fresh", "todays"} {
repo := fmt.Sprintf("%v:%v", repoName, tag)
buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(`
FROM busybox
ENTRYPOINT ["/bin/echo"]
ENV FOO foo
ENV BAR bar
CMD echo %s
`, repo)))
dockerCmd(c, "push", repo)
repos = append(repos, repo)
}
// Clear local images store.
args := append([]string{"rmi"}, repos...)
dockerCmd(c, args...)
// Run multiple re-pulls concurrently
results := make(chan error)
numPulls := 3
for i := 0; i != numPulls; i++ {
go func() {
result := icmd.RunCommand(dockerBinary, "pull", "-a", repoName)
results <- result.Error
}()
}
// These checks are separate from the loop above because the check
// package is not goroutine-safe.
for i := 0; i != numPulls; i++ {
err := <-results
c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err))
}
// Ensure all tags were pulled successfully
for _, repo := range repos {
dockerCmd(c, "inspect", repo)
out, _ := dockerCmd(c, "run", "--rm", repo)
c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo)
}
}
func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
testConcurrentPullWholeRepo(c)
}
func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
testConcurrentPullWholeRepo(c)
}
// testConcurrentFailingPull tries a concurrent pull that doesn't succeed.
func testConcurrentFailingPull(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
// Run multiple pulls concurrently
results := make(chan error)
numPulls := 3
for i := 0; i != numPulls; i++ {
go func() {
result := icmd.RunCommand(dockerBinary, "pull", repoName+":asdfasdf")
results <- result.Error
}()
}
// These checks are separate from the loop above because the check
// package is not goroutine-safe.
for i := 0; i != numPulls; i++ {
err := <-results
c.Assert(err, checker.NotNil, check.Commentf("expected pull to fail"))
}
}
func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) {
testConcurrentFailingPull(c)
}
func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) {
testConcurrentFailingPull(c)
}
// testConcurrentPullMultipleTags pulls multiple tags from the same repo
// concurrently.
func testConcurrentPullMultipleTags(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
repos := []string{}
for _, tag := range []string{"recent", "fresh", "todays"} {
repo := fmt.Sprintf("%v:%v", repoName, tag)
buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(`
FROM busybox
ENTRYPOINT ["/bin/echo"]
ENV FOO foo
ENV BAR bar
CMD echo %s
`, repo)))
dockerCmd(c, "push", repo)
repos = append(repos, repo)
}
// Clear local images store.
args := append([]string{"rmi"}, repos...)
dockerCmd(c, args...)
// Re-pull individual tags, in parallel
results := make(chan error)
for _, repo := range repos {
go func(repo string) {
result := icmd.RunCommand(dockerBinary, "pull", repo)
results <- result.Error
}(repo)
}
// These checks are separate from the loop above because the check
// package is not goroutine-safe.
for range repos {
err := <-results
c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err))
}
// Ensure all tags were pulled successfully
for _, repo := range repos {
dockerCmd(c, "inspect", repo)
out, _ := dockerCmd(c, "run", "--rm", repo)
c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo)
}
}
func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
testConcurrentPullMultipleTags(c)
}
func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
testConcurrentPullMultipleTags(c)
}
// testPullIDStability verifies that pushing an image and pulling it back
// preserves the image ID.
func testPullIDStability(c *check.C) {
derivedImage := privateRegistryURL + "/dockercli/id-stability"
baseImage := "busybox"
buildImageSuccessfully(c, derivedImage, build.WithDockerfile(fmt.Sprintf(`
FROM %s
ENV derived true
ENV asdf true
RUN dd if=/dev/zero of=/file bs=1024 count=1024
CMD echo %s
`, baseImage, derivedImage)))
originalID := getIDByName(c, derivedImage)
dockerCmd(c, "push", derivedImage)
// Pull
out, _ := dockerCmd(c, "pull", derivedImage)
if strings.Contains(out, "Pull complete") {
c.Fatalf("repull redownloaded a layer: %s", out)
}
derivedIDAfterPull := getIDByName(c, derivedImage)
if derivedIDAfterPull != originalID {
c.Fatal("image's ID unexpectedly changed after a repush/repull")
}
// Make sure the image runs correctly
out, _ = dockerCmd(c, "run", "--rm", derivedImage)
if strings.TrimSpace(out) != derivedImage {
c.Fatalf("expected %s; got %s", derivedImage, out)
}
// Confirm that repushing and repulling does not change the computed ID
dockerCmd(c, "push", derivedImage)
dockerCmd(c, "rmi", derivedImage)
dockerCmd(c, "pull", derivedImage)
derivedIDAfterPull = getIDByName(c, derivedImage)
if derivedIDAfterPull != originalID {
c.Fatal("image's ID unexpectedly changed after a repush/repull")
}
// Make sure the image still runs
out, _ = dockerCmd(c, "run", "--rm", derivedImage)
if strings.TrimSpace(out) != derivedImage {
c.Fatalf("expected %s; got %s", derivedImage, out)
}
}
func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) {
testPullIDStability(c)
}
func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) {
testPullIDStability(c)
}
// #21213
func testPullNoLayers(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL)
buildImageSuccessfully(c, repoName, build.WithDockerfile(`
FROM scratch
ENV foo bar`))
dockerCmd(c, "push", repoName)
dockerCmd(c, "rmi", repoName)
dockerCmd(c, "pull", repoName)
}
func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) {
testPullNoLayers(c)
}
func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) {
testPullNoLayers(c)
}
func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) {
testRequires(c, NotArm)
pushDigest, err := setupImage(c)
c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
// Inject a manifest list into the registry
manifestList := &manifestlist.ManifestList{
Versioned: manifest.Versioned{
SchemaVersion: 2,
MediaType: manifestlist.MediaTypeManifestList,
},
Manifests: []manifestlist.ManifestDescriptor{
{
Descriptor: distribution.Descriptor{
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 3253,
MediaType: schema2.MediaTypeManifest,
},
Platform: manifestlist.PlatformSpec{
Architecture: "bogus_arch",
OS: "bogus_os",
},
},
{
Descriptor: distribution.Descriptor{
Digest: pushDigest,
Size: 3253,
MediaType: schema2.MediaTypeManifest,
},
Platform: manifestlist.PlatformSpec{
Architecture: runtime.GOARCH,
OS: runtime.GOOS,
},
},
},
}
manifestListJSON, err := json.MarshalIndent(manifestList, "", " ")
c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list"))
manifestListDigest := digest.FromBytes(manifestListJSON)
hexDigest := manifestListDigest.Hex()
registryV2Path := s.reg.Path()
// Write manifest list to blob store
blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest)
err = os.MkdirAll(blobDir, 0755)
c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir"))
blobPath := filepath.Join(blobDir, "data")
err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644)
c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list"))
// Add to revision store
revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest)
err = os.Mkdir(revisionDir, 0755)
c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir"))
revisionPath := filepath.Join(revisionDir, "link")
err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644)
c.Assert(err, checker.IsNil, check.Commentf("error writing revision link"))
// Update tag
tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link")
err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644)
c.Assert(err, checker.IsNil, check.Commentf("error writing tag link"))
// Verify that the image can be pulled through the manifest list.
out, _ := dockerCmd(c, "pull", repoName)
// The pull output includes "Digest: <digest>", so find that
matches := digestRegex.FindStringSubmatch(out)
c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out))
pullDigest := matches[1]
// Make sure the pushed and pull digests match
c.Assert(manifestListDigest.String(), checker.Equals, pullDigest)
// Was the image actually created?
dockerCmd(c, "inspect", repoName)
dockerCmd(c, "rmi", repoName)
}
// #23100
func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithScheme(c *check.C) {
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
workingDir, err := os.Getwd()
c.Assert(err, checker.IsNil)
absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth"))
c.Assert(err, checker.IsNil)
testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute)
os.Setenv("PATH", testPath)
repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL)
tmp, err := ioutil.TempDir("", "integration-cli-")
c.Assert(err, checker.IsNil)
externalAuthConfig := `{ "credsStore": "shell-test" }`
configPath := filepath.Join(tmp, "config.json")
err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644)
c.Assert(err, checker.IsNil)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
b, err := ioutil.ReadFile(configPath)
c.Assert(err, checker.IsNil)
c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":")
dockerCmd(c, "--config", tmp, "tag", "busybox", repoName)
dockerCmd(c, "--config", tmp, "push", repoName)
dockerCmd(c, "--config", tmp, "logout", privateRegistryURL)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), "https://"+privateRegistryURL)
dockerCmd(c, "--config", tmp, "pull", repoName)
// likewise push should work
repoName2 := fmt.Sprintf("%v/dockercli/busybox:nocreds", privateRegistryURL)
dockerCmd(c, "tag", repoName, repoName2)
dockerCmd(c, "--config", tmp, "push", repoName2)
// logout should work w scheme also because it will be stripped
dockerCmd(c, "--config", tmp, "logout", "https://"+privateRegistryURL)
}
func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *check.C) {
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
workingDir, err := os.Getwd()
c.Assert(err, checker.IsNil)
absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth"))
c.Assert(err, checker.IsNil)
testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute)
os.Setenv("PATH", testPath)
repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL)
tmp, err := ioutil.TempDir("", "integration-cli-")
c.Assert(err, checker.IsNil)
externalAuthConfig := `{ "credsStore": "shell-test" }`
configPath := filepath.Join(tmp, "config.json")
err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644)
c.Assert(err, checker.IsNil)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
b, err := ioutil.ReadFile(configPath)
c.Assert(err, checker.IsNil)
c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":")
dockerCmd(c, "--config", tmp, "tag", "busybox", repoName)
dockerCmd(c, "--config", tmp, "push", repoName)
dockerCmd(c, "--config", tmp, "pull", repoName)
}
// TestRunImplicitPullWithNoTag should pull implicitly only the default tag (latest)
func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *check.C) {
testRequires(c, DaemonIsLinux)
repo := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
repoTag1 := fmt.Sprintf("%v:latest", repo)
repoTag2 := fmt.Sprintf("%v:t1", repo)
// tag the image and upload it to the private registry
dockerCmd(c, "tag", "busybox", repoTag1)
dockerCmd(c, "tag", "busybox", repoTag2)
dockerCmd(c, "push", repo)
dockerCmd(c, "rmi", repoTag1)
dockerCmd(c, "rmi", repoTag2)
out, _ := dockerCmd(c, "run", repo)
c.Assert(out, checker.Contains, fmt.Sprintf("Unable to find image '%s:latest' locally", repo))
// There should be only one line for repo, the one with repo:latest
outImageCmd, _ := dockerCmd(c, "images", repo)
splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n")
c.Assert(splitOutImageCmd, checker.HasLen, 2)
}
|
[
"\"PATH\"",
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
recommendations-grpc-service/pkg/tracer/main.go
|
package tracer
import (
"fmt"
"os"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/jaeger"
stdout "go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
)
func InitTracer() *sdktrace.TracerProvider {
exporter, err := stdout.New(stdout.WithPrettyPrint())
if err != nil {
fmt.Println("Failed to init tracer", err)
}
exp, err := jaeger.New(
jaeger.WithCollectorEndpoint(
jaeger.WithEndpoint(
os.Getenv("JAEGER_COLLECTOR_ENDPOINT"),
),
),
)
if err != nil {
fmt.Println("Failed to init jaeger", err)
}
tp := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exp),
sdktrace.WithSampler(sdktrace.AlwaysSample()),
sdktrace.WithBatcher(exporter),
sdktrace.WithResource(resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String("recommendations-grpc-service"),
)),
)
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))
return tp
}
|
[
"\"JAEGER_COLLECTOR_ENDPOINT\""
] |
[] |
[
"JAEGER_COLLECTOR_ENDPOINT"
] |
[]
|
["JAEGER_COLLECTOR_ENDPOINT"]
|
go
| 1 | 0 | |
utils/jwt_test.go
|
package utils_test
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
jwt "github.com/zoommix/fasthttp_template/utils"
)
// TestGenerateToken ...
func TestGenerateToken(t *testing.T) {
jwtWrapper := jwt.JwtWrapper{
SecretKey: "verysecretkey",
Issuer: "AuthService",
ExpirationHours: 24,
}
generatedToken, err := jwtWrapper.GenerateToken(121)
assert.NoError(t, err)
os.Setenv("testToken", generatedToken)
}
// TestValidateToken ...
func TestValidateToken(t *testing.T) {
encodedToken := os.Getenv("testToken")
jwtWrapper := jwt.JwtWrapper{
SecretKey: "verysecretkey",
Issuer: "AuthService",
}
claims, err := jwtWrapper.ValidateToken(encodedToken)
assert.NoError(t, err)
assert.Equal(t, 121, claims.UserID)
assert.Equal(t, "AuthService", claims.Issuer)
}
|
[
"\"testToken\""
] |
[] |
[
"testToken"
] |
[]
|
["testToken"]
|
go
| 1 | 0 | |
run_integration_tests.go
|
// +build ignore
package main
import (
"bufio"
"bytes"
"encoding/base64"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
)
// ForbiddenImports are the packages from the stdlib that should not be used in
// our code.
var ForbiddenImports = map[string]bool{
"errors": true,
}
// Use a specific version of gofmt (the latest stable, usually) to guarantee
// deterministic formatting. This is used with the GoVersion.AtLeast()
// function (so that we don't forget to update it).
var GofmtVersion = ParseGoVersion("go1.11")
// GoVersion is the version of Go used to compile the project.
type GoVersion struct {
Major int
Minor int
Patch int
}
// ParseGoVersion parses the Go version s. If s cannot be parsed, the returned GoVersion is null.
func ParseGoVersion(s string) (v GoVersion) {
if !strings.HasPrefix(s, "go") {
return
}
s = s[2:]
data := strings.Split(s, ".")
if len(data) < 2 || len(data) > 3 {
// invalid version
return GoVersion{}
}
var err error
v.Major, err = strconv.Atoi(data[0])
if err != nil {
return GoVersion{}
}
// try to parse the minor version while removing an eventual suffix (like
// "rc2" or so)
for s := data[1]; s != ""; s = s[:len(s)-1] {
v.Minor, err = strconv.Atoi(s)
if err == nil {
break
}
}
if v.Minor == 0 {
// no minor version found
return GoVersion{}
}
if len(data) >= 3 {
v.Patch, err = strconv.Atoi(data[2])
if err != nil {
return GoVersion{}
}
}
return
}
// AtLeast returns true if v is at least as new as other. If v is empty, true is returned.
func (v GoVersion) AtLeast(other GoVersion) bool {
var empty GoVersion
// the empty version satisfies all versions
if v == empty {
return true
}
if v.Major < other.Major {
return false
}
if v.Minor < other.Minor {
return false
}
if v.Patch < other.Patch {
return false
}
return true
}
func (v GoVersion) String() string {
return fmt.Sprintf("Go %d.%d.%d", v.Major, v.Minor, v.Patch)
}
// CloudBackends contains a map of backend tests for cloud services to one
// of the essential environment variables which must be present in order to
// test it.
var CloudBackends = map[string]string{
"restic/backend/s3.TestBackendS3": "RESTIC_TEST_S3_REPOSITORY",
"restic/backend/swift.TestBackendSwift": "RESTIC_TEST_SWIFT",
"restic/backend/b2.TestBackendB2": "RESTIC_TEST_B2_REPOSITORY",
"restic/backend/gs.TestBackendGS": "RESTIC_TEST_GS_REPOSITORY",
"restic/backend/azure.TestBackendAzure": "RESTIC_TEST_AZURE_REPOSITORY",
}
var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests")
func init() {
flag.Parse()
}
// CIEnvironment is implemented by environments where tests can be run.
type CIEnvironment interface {
Prepare() error
RunTests() error
Teardown() error
}
// TravisEnvironment is the environment in which Travis tests run.
type TravisEnvironment struct {
goxOSArch []string
env map[string]string
gcsCredentialsFile string
}
func (env *TravisEnvironment) getMinio() error {
tempfile, err := os.Create(filepath.Join(os.Getenv("GOPATH"), "bin", "minio"))
if err != nil {
return fmt.Errorf("create tempfile for minio download failed: %v", err)
}
url := fmt.Sprintf("https://dl.minio.io/server/minio/release/%s-%s/minio",
runtime.GOOS, runtime.GOARCH)
msg("downloading %v\n", url)
res, err := http.Get(url)
if err != nil {
return fmt.Errorf("error downloading minio server: %v", err)
}
_, err = io.Copy(tempfile, res.Body)
if err != nil {
return fmt.Errorf("error saving minio server to file: %v", err)
}
err = res.Body.Close()
if err != nil {
return fmt.Errorf("error closing HTTP download: %v", err)
}
err = tempfile.Close()
if err != nil {
msg("closing tempfile failed: %v\n", err)
return fmt.Errorf("error closing minio server file: %v", err)
}
err = os.Chmod(tempfile.Name(), 0755)
if err != nil {
return fmt.Errorf("chmod(minio-server) failed: %v", err)
}
msg("downloaded minio server to %v\n", tempfile.Name())
return nil
}
// Prepare installs dependencies and starts services in order to run the tests.
func (env *TravisEnvironment) Prepare() error {
env.env = make(map[string]string)
msg("preparing environment for Travis CI\n")
pkgs := []string{
"github.com/NebulousLabs/glyphcheck",
"github.com/restic/rest-server/cmd/rest-server",
"github.com/restic/calens",
"github.com/ncw/rclone",
}
for _, pkg := range pkgs {
err := run("go", "get", pkg)
if err != nil {
return err
}
}
if err := env.getMinio(); err != nil {
return err
}
if *runCrossCompile {
// only test cross compilation on linux with Travis
if err := run("go", "get", "github.com/mitchellh/gox"); err != nil {
return err
}
if runtime.GOOS == "linux" {
env.goxOSArch = []string{
"linux/386", "linux/amd64",
"windows/386", "windows/amd64",
"darwin/386", "darwin/amd64",
"freebsd/386", "freebsd/amd64",
"openbsd/386", "openbsd/amd64",
"netbsd/386", "netbsd/amd64",
"linux/arm", "freebsd/arm",
}
if os.Getenv("RESTIC_BUILD_SOLARIS") == "0" {
msg("Skipping Solaris build\n")
} else {
env.goxOSArch = append(env.goxOSArch, "solaris/amd64")
}
} else {
env.goxOSArch = []string{runtime.GOOS + "/" + runtime.GOARCH}
}
msg("gox: OS/ARCH %v\n", env.goxOSArch)
}
// do not run cloud tests on darwin
if os.Getenv("RESTIC_TEST_CLOUD_BACKENDS") == "0" {
msg("skipping cloud backend tests\n")
for _, name := range CloudBackends {
err := os.Unsetenv(name)
if err != nil {
msg(" error unsetting %v: %v\n", name, err)
}
}
}
// extract credentials file for GCS tests
if b64data := os.Getenv("RESTIC_TEST_GS_APPLICATION_CREDENTIALS_B64"); b64data != "" {
buf, err := base64.StdEncoding.DecodeString(b64data)
if err != nil {
return err
}
f, err := ioutil.TempFile("", "gcs-credentials-")
if err != nil {
return err
}
msg("saving GCS credentials to %v\n", f.Name())
_, err = f.Write(buf)
if err != nil {
f.Close()
return err
}
env.gcsCredentialsFile = f.Name()
if err = f.Close(); err != nil {
return err
}
}
return nil
}
// Teardown stops backend services and cleans the environment again.
func (env *TravisEnvironment) Teardown() error {
msg("run travis teardown\n")
if env.gcsCredentialsFile != "" {
msg("remove gcs credentials file %v\n", env.gcsCredentialsFile)
return os.Remove(env.gcsCredentialsFile)
}
return nil
}
// RunTests starts the tests for Travis.
func (env *TravisEnvironment) RunTests() error {
env.env["GOPATH"] = os.Getenv("GOPATH")
if env.gcsCredentialsFile != "" {
env.env["GOOGLE_APPLICATION_CREDENTIALS"] = env.gcsCredentialsFile
}
// ensure that the following tests cannot be silently skipped on Travis
ensureTests := []string{
"restic/backend/rest.TestBackendREST",
"restic/backend/sftp.TestBackendSFTP",
"restic/backend/s3.TestBackendMinio",
"restic/backend/rclone.TestBackendRclone",
}
// make sure that cloud backends for which we have credentials are not
// silently skipped.
for pkg, env := range CloudBackends {
if _, ok := os.LookupEnv(env); ok {
ensureTests = append(ensureTests, pkg)
} else {
msg("credentials for %v are not available, skipping\n", pkg)
}
}
env.env["RESTIC_TEST_DISALLOW_SKIP"] = strings.Join(ensureTests, ",")
if *runCrossCompile {
// compile for all target architectures with tags
for _, tags := range []string{"release", "debug"} {
err := runWithEnv(env.env, "gox", "-verbose",
"-osarch", strings.Join(env.goxOSArch, " "),
"-tags", tags,
"-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}",
"./cmd/restic")
if err != nil {
return err
}
}
}
args := []string{"go", "run", "build.go"}
v := ParseGoVersion(runtime.Version())
msg("Detected Go version %v\n", v)
if v.AtLeast(GoVersion{1, 11, 0}) {
args = []string{"go", "run", "-mod=vendor", "build.go"}
env.env["GOPROXY"] = "off"
delete(env.env, "GOPATH")
os.Unsetenv("GOPATH")
}
// run the build script
err := run(args[0], args[1:]...)
if err != nil {
return err
}
// run the tests and gather coverage information (for Go >= 1.10)
switch {
case v.AtLeast(GoVersion{1, 11, 0}):
err = runWithEnv(env.env, "go", "test", "-count", "1", "-mod=vendor", "-coverprofile", "all.cov", "./...")
case v.AtLeast(GoVersion{1, 10, 0}):
err = runWithEnv(env.env, "go", "test", "-count", "1", "-coverprofile", "all.cov", "./...")
default:
err = runWithEnv(env.env, "go", "test", "-count", "1", "./...")
}
if err != nil {
return err
}
// only run gofmt on a specific version of Go.
if v.AtLeast(GofmtVersion) {
if err = runGofmt(); err != nil {
return err
}
msg("run go mod vendor\n")
if err := runGoModVendor(); err != nil {
return err
}
msg("run go mod tidy\n")
if err := runGoModTidy(); err != nil {
return err
}
} else {
msg("Skipping gofmt and module vendor check for %v\n", v)
}
if err = runGlyphcheck(); err != nil {
return err
}
// check for forbidden imports
deps, err := env.findImports()
if err != nil {
return err
}
foundForbiddenImports := false
for name, imports := range deps {
for _, pkg := range imports {
if _, ok := ForbiddenImports[pkg]; ok {
fmt.Fprintf(os.Stderr, "========== package %v imports forbidden package %v\n", name, pkg)
foundForbiddenImports = true
}
}
}
if foundForbiddenImports {
return errors.New("CI: forbidden imports found")
}
// check that the entries in changelog/ are valid
if err := run("calens"); err != nil {
return errors.New("calens failed, files in changelog/ are not valid")
}
return nil
}
// AppveyorEnvironment is the environment on Windows.
type AppveyorEnvironment struct{}
// Prepare installs dependencies and starts services in order to run the tests.
func (env *AppveyorEnvironment) Prepare() error {
return nil
}
// RunTests start the tests.
func (env *AppveyorEnvironment) RunTests() error {
e := map[string]string{
"GOPROXY": "off",
}
return runWithEnv(e, "go", "run", "-mod=vendor", "build.go", "-v", "-T")
}
// Teardown is a noop.
func (env *AppveyorEnvironment) Teardown() error {
return nil
}
// findGoFiles returns a list of go source code file names below dir.
func findGoFiles(dir string) (list []string, err error) {
err = filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error {
relpath, err := filepath.Rel(dir, name)
if err != nil {
return err
}
if relpath == "vendor" || relpath == "pkg" {
return filepath.SkipDir
}
if filepath.Ext(relpath) == ".go" {
list = append(list, relpath)
}
return err
})
return list, err
}
func msg(format string, args ...interface{}) {
fmt.Printf("CI: "+format, args...)
}
func updateEnv(env []string, override map[string]string) []string {
var newEnv []string
for _, s := range env {
d := strings.SplitN(s, "=", 2)
key := d[0]
if _, ok := override[key]; ok {
continue
}
newEnv = append(newEnv, s)
}
for k, v := range override {
newEnv = append(newEnv, k+"="+v)
}
return newEnv
}
func (env *TravisEnvironment) findImports() (map[string][]string, error) {
res := make(map[string][]string)
cmd := exec.Command("go", "list", "-f", `{{.ImportPath}} {{join .Imports " "}}`, "./internal/...", "./cmd/...")
cmd.Env = updateEnv(os.Environ(), env.env)
cmd.Stderr = os.Stderr
output, err := cmd.Output()
if err != nil {
return nil, err
}
sc := bufio.NewScanner(bytes.NewReader(output))
for sc.Scan() {
wordScanner := bufio.NewScanner(strings.NewReader(sc.Text()))
wordScanner.Split(bufio.ScanWords)
if !wordScanner.Scan() {
return nil, fmt.Errorf("package name not found in line: %s", output)
}
name := wordScanner.Text()
var deps []string
for wordScanner.Scan() {
deps = append(deps, wordScanner.Text())
}
res[name] = deps
}
return res, nil
}
func runGofmt() error {
dir, err := os.Getwd()
if err != nil {
return fmt.Errorf("Getwd(): %v", err)
}
files, err := findGoFiles(dir)
if err != nil {
return fmt.Errorf("error finding Go files: %v", err)
}
msg("runGofmt() with %d files\n", len(files))
args := append([]string{"-l"}, files...)
cmd := exec.Command("gofmt", args...)
cmd.Stderr = os.Stderr
buf, err := cmd.Output()
if err != nil {
return fmt.Errorf("error running gofmt: %v\noutput: %s", err, buf)
}
if len(buf) > 0 {
return fmt.Errorf("not formatted with `gofmt`:\n%s", buf)
}
return nil
}
func runGoModVendor() error {
cmd := exec.Command("go", "mod", "vendor")
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
cmd.Env = updateEnv(os.Environ(), map[string]string{
"GO111MODULE": "on",
})
err := cmd.Run()
if err != nil {
return fmt.Errorf("error running 'go mod vendor': %v", err)
}
// check that "git diff" does not return any output
cmd = exec.Command("git", "diff", "vendor")
cmd.Stderr = os.Stderr
buf, err := cmd.Output()
if err != nil {
return fmt.Errorf("error running 'git diff vendor': %v\noutput: %s", err, buf)
}
if len(buf) > 0 {
return fmt.Errorf("vendor/ directory was modified:\n%s", buf)
}
return nil
}
// run "go mod tidy" so that go.sum and go.mod are updated to reflect all
// dependencies for all OS/Arch combinations, see
// https://github.com/golang/go/wiki/Modules#why-does-go-mod-tidy-put-so-many-indirect-dependencies-in-my-gomod
func runGoModTidy() error {
cmd := exec.Command("go", "mod", "tidy")
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
cmd.Env = updateEnv(os.Environ(), map[string]string{
"GO111MODULE": "on",
})
err := cmd.Run()
if err != nil {
return fmt.Errorf("error running 'go mod vendor': %v", err)
}
// check that "git diff" does not return any output
cmd = exec.Command("git", "diff", "go.sum", "go.mod")
cmd.Stderr = os.Stderr
buf, err := cmd.Output()
if err != nil {
return fmt.Errorf("error running 'git diff vendor': %v\noutput: %s", err, buf)
}
if len(buf) > 0 {
return fmt.Errorf("vendor/ directory was modified:\n%s", buf)
}
return nil
}
func runGlyphcheck() error {
cmd := exec.Command("glyphcheck", "./cmd/...", "./internal/...")
cmd.Stderr = os.Stderr
buf, err := cmd.Output()
if err != nil {
return fmt.Errorf("error running glyphcheck: %v\noutput: %s", err, buf)
}
return nil
}
func run(command string, args ...string) error {
msg("run %v %v\n", command, strings.Join(args, " "))
return runWithEnv(nil, command, args...)
}
// runWithEnv calls a command with the current environment, except the entries
// of the env map are set additionally.
func runWithEnv(env map[string]string, command string, args ...string) error {
msg("runWithEnv %v %v\n", command, strings.Join(args, " "))
cmd := exec.Command(command, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if env != nil {
cmd.Env = updateEnv(os.Environ(), env)
}
err := cmd.Run()
if err != nil {
return fmt.Errorf("error running %v %v: %v",
command, strings.Join(args, " "), err)
}
return nil
}
func isTravis() bool {
return os.Getenv("TRAVIS_BUILD_DIR") != ""
}
func isAppveyor() bool {
return runtime.GOOS == "windows"
}
func main() {
var env CIEnvironment
switch {
case isTravis():
env = &TravisEnvironment{}
case isAppveyor():
env = &AppveyorEnvironment{}
default:
fmt.Fprintln(os.Stderr, "unknown CI environment")
os.Exit(1)
}
err := env.Prepare()
if err != nil {
fmt.Fprintf(os.Stderr, "error preparing: %v\n", err)
os.Exit(1)
}
err = env.RunTests()
if err != nil {
fmt.Fprintf(os.Stderr, "error running tests: %v\n", err)
os.Exit(2)
}
err = env.Teardown()
if err != nil {
fmt.Fprintf(os.Stderr, "error during teardown: %v\n", err)
os.Exit(3)
}
}
|
[
"\"GOPATH\"",
"\"RESTIC_BUILD_SOLARIS\"",
"\"RESTIC_TEST_CLOUD_BACKENDS\"",
"\"RESTIC_TEST_GS_APPLICATION_CREDENTIALS_B64\"",
"\"GOPATH\"",
"\"TRAVIS_BUILD_DIR\""
] |
[] |
[
"TRAVIS_BUILD_DIR",
"GOPATH",
"RESTIC_TEST_CLOUD_BACKENDS",
"RESTIC_BUILD_SOLARIS",
"RESTIC_TEST_GS_APPLICATION_CREDENTIALS_B64"
] |
[]
|
["TRAVIS_BUILD_DIR", "GOPATH", "RESTIC_TEST_CLOUD_BACKENDS", "RESTIC_BUILD_SOLARIS", "RESTIC_TEST_GS_APPLICATION_CREDENTIALS_B64"]
|
go
| 5 | 0 | |
PaddleNLP/Research/ACL2019-JEMT/train.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ast
import copy
import logging
import multiprocessing
import os
import six
import sys
import time
import numpy as np
import paddle.fluid as fluid
import reader
from config import *
from desc import *
from model import transformer, position_encoding_init
def parse_args():
parser = argparse.ArgumentParser("Training for Transformer.")
parser.add_argument(
"--src_vocab_fpath",
type=str,
required=True,
help="The path of vocabulary file of source language.")
parser.add_argument(
"--trg_vocab_fpath",
type=str,
required=True,
help="The path of vocabulary file of target language.")
parser.add_argument(
"--phoneme_vocab_fpath",
type=str,
required=True,
help="The path of vocabulary file of phonemes.")
parser.add_argument(
"--lexicon_fpath",
type=str,
required=True,
help="The path of lexicon of source language.")
parser.add_argument(
"--train_file_pattern",
type=str,
required=True,
help="The pattern to match training data files.")
parser.add_argument(
"--val_file_pattern",
type=str,
help="The pattern to match validation data files.")
parser.add_argument(
"--use_token_batch",
type=ast.literal_eval,
default=True,
help="The flag indicating whether to "
"produce batch data according to token number.")
parser.add_argument(
"--batch_size",
type=int,
default=4096,
help="The number of sequences contained in a mini-batch, or the maximum "
"number of tokens (include paddings) contained in a mini-batch. Note "
"that this represents the number on single device and the actual batch "
"size for multi-devices will multiply the device number.")
parser.add_argument(
"--pool_size",
type=int,
default=200000,
help="The buffer size to pool data.")
parser.add_argument(
"--sort_type",
default="pool",
choices=("global", "pool", "none"),
help="The grain to sort by length: global for all instances; pool for "
"instances in pool; none for no sort.")
parser.add_argument(
"--shuffle",
type=ast.literal_eval,
default=True,
help="The flag indicating whether to shuffle instances in each pass.")
parser.add_argument(
"--shuffle_batch",
type=ast.literal_eval,
default=True,
help="The flag indicating whether to shuffle the data batches.")
parser.add_argument(
"--special_token",
type=str,
default=["<s>", "<e>", "<unk>"],
nargs=3,
help="The <bos>, <eos> and <unk> tokens in the dictionary.")
parser.add_argument(
"--token_delimiter",
type=lambda x: str(x.encode().decode("unicode-escape")),
default=" ",
help="The delimiter used to split tokens in source or target sentences. "
"For EN-DE BPE data we provided, use spaces as token delimiter. ")
parser.add_argument(
'opts',
help='See config.py for all options',
default=None,
nargs=argparse.REMAINDER)
parser.add_argument(
'--local',
type=ast.literal_eval,
default=True,
help='Whether to run as local mode.')
parser.add_argument(
'--device',
type=str,
default='GPU',
choices=['CPU', 'GPU'],
help="The device type.")
parser.add_argument(
'--update_method',
choices=("pserver", "nccl2"),
default="pserver",
help='Update method.')
parser.add_argument(
'--sync', type=ast.literal_eval, default=True, help="sync mode.")
parser.add_argument(
"--enable_ce",
type=ast.literal_eval,
default=False,
help="The flag indicating whether to run the task "
"for continuous evaluation.")
parser.add_argument(
"--use_py_reader",
type=ast.literal_eval,
default=True,
help="The flag indicating whether to use py_reader.")
parser.add_argument(
"--fetch_steps",
type=int,
default=100,
help="The frequency to fetch and print output.")
args = parser.parse_args()
# Append args related to dict
src_dict = reader.DataReader.load_dict(args.src_vocab_fpath)
trg_dict = reader.DataReader.load_dict(args.trg_vocab_fpath)
phone_dict = reader.DataReader.load_dict(args.phoneme_vocab_fpath)
dict_args = [
"src_vocab_size", str(len(src_dict)), "trg_vocab_size",
str(len(trg_dict)), "phone_vocab_size", str(len(phone_dict)), "bos_idx",
str(src_dict[args.special_token[0]]), "eos_idx",
str(src_dict[args.special_token[1]]), "unk_idx",
str(src_dict[args.special_token[2]])
]
merge_cfg_from_list(args.opts + dict_args,
[TrainTaskConfig, ModelHyperParams])
return args
def append_nccl2_prepare(startup_prog, trainer_id, worker_endpoints,
current_endpoint):
assert (trainer_id >= 0 and len(worker_endpoints) > 1 and
current_endpoint in worker_endpoints)
eps = copy.deepcopy(worker_endpoints)
eps.remove(current_endpoint)
nccl_id_var = startup_prog.global_block().create_var(
name="NCCLID", persistable=True, type=fluid.core.VarDesc.VarType.RAW)
startup_prog.global_block().append_op(
type="gen_nccl_id",
inputs={},
outputs={"NCCLID": nccl_id_var},
attrs={
"endpoint": current_endpoint,
"endpoint_list": eps,
"trainer_id": trainer_id
})
return nccl_id_var
def pad_phoneme_data(phoneme_seqs, pad_idx, max_seq_len):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
ph_seq_lens = []
for ps in phoneme_seqs:
cur_seq_lens = [len(x) for x in ps]
ph_seq_lens.append(max(cur_seq_lens))
max_ph_seq_len = max(ph_seq_lens)
batch_size = len(phoneme_seqs)
phoneme_data = pad_idx * np.ones(
(batch_size, max_seq_len, max_ph_seq_len), dtype=np.int64)
phoneme_mask = np.zeros(
(batch_size, max_seq_len, max_ph_seq_len), dtype=np.int64)
for i in range(batch_size):
cur_ph_seq = phoneme_seqs[i]
for j, cur_word_phs in enumerate(cur_ph_seq):
word_phs_len = len(cur_word_phs)
phoneme_data[i, j, :word_phs_len] = cur_word_phs
phoneme_mask[i, j, :word_phs_len] = 1
phoneme_data = np.reshape(phoneme_data, [batch_size, max_seq_len, -1, 1])
return phoneme_data, phoneme_mask, max_ph_seq_len
def pad_batch_data(insts,
pad_idx,
n_head,
is_target=False,
is_label=False,
return_attn_bias=True,
return_max_len=True,
return_num_token=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array(
[inst + [pad_idx] * (max_len - len(inst)) for inst in insts])
return_list += [inst_data.astype("int64").reshape([-1, 1])]
if is_label: # label weight
inst_weight = np.array([[1.] * len(inst) + [0.] * (max_len - len(inst))
for inst in insts])
return_list += [inst_weight.astype("float32").reshape([-1, 1])]
else: # position data
inst_pos = np.array([
list(range(0, len(inst))) + [0] * (max_len - len(inst))
for inst in insts
])
return_list += [inst_pos.astype("int64").reshape([-1, 1])]
if return_attn_bias:
if is_target:
# This is used to avoid attention on paddings and subsequent
# words.
slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, max_len))
slf_attn_bias_data = np.triu(slf_attn_bias_data,
1).reshape([-1, 1, max_len, max_len])
slf_attn_bias_data = np.tile(slf_attn_bias_data,
[1, n_head, 1, 1]) * [-1e9]
else:
# This is used to avoid attention on paddings.
slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] *
(max_len - len(inst))
for inst in insts])
slf_attn_bias_data = np.tile(
slf_attn_bias_data.reshape([-1, 1, 1, max_len]),
[1, n_head, max_len, 1])
return_list += [slf_attn_bias_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
return return_list if len(return_list) > 1 else return_list[0]
def prepare_batch_input(insts, data_input_names, src_pad_idx, phone_pad_idx,
trg_pad_idx, n_head, d_model):
"""
Put all padded data needed by training into a dict.
"""
src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data(
[inst[0] for inst in insts], src_pad_idx, n_head, is_target=False)
src_word = src_word.reshape(-1, src_max_len, 1)
src_pos = src_pos.reshape(-1, src_max_len, 1)
src_phone, src_phone_mask, max_phone_len = pad_phoneme_data(
[inst[1] for inst in insts], phone_pad_idx, src_max_len)
trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = pad_batch_data(
[inst[2] for inst in insts], trg_pad_idx, n_head, is_target=True)
trg_word = trg_word.reshape(-1, trg_max_len, 1)
trg_pos = trg_pos.reshape(-1, trg_max_len, 1)
trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :],
[1, 1, trg_max_len, 1]).astype("float32")
lbl_word, lbl_weight, num_token = pad_batch_data(
[inst[3] for inst in insts],
trg_pad_idx,
n_head,
is_target=False,
is_label=True,
return_attn_bias=False,
return_max_len=False,
return_num_token=True)
data_input_dict = dict(
zip(data_input_names, [
src_word, src_pos, src_slf_attn_bias, src_phone, src_phone_mask,
trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias, lbl_word,
lbl_weight
]))
return data_input_dict, np.asarray([num_token], dtype="float32")
def prepare_data_generator(args,
is_test,
count,
pyreader,
py_reader_provider_wrapper,
place=None):
"""
Data generator wrapper for DataReader. If use py_reader, set the data
provider for py_reader
"""
data_reader = reader.DataReader(
phoneme_vocab_fpath=args.phoneme_vocab_fpath,
lexicon_fpath=args.lexicon_fpath,
fpattern=args.val_file_pattern if is_test else args.train_file_pattern,
src_vocab_fpath=args.src_vocab_fpath,
trg_vocab_fpath=args.trg_vocab_fpath,
token_delimiter=args.token_delimiter,
use_token_batch=args.use_token_batch,
batch_size=args.batch_size * (1 if args.use_token_batch else count),
pool_size=args.pool_size,
sort_type=args.sort_type,
shuffle=args.shuffle,
shuffle_batch=args.shuffle_batch,
start_mark=args.special_token[0],
end_mark=args.special_token[1],
unk_mark=args.special_token[2],
# count start and end tokens out
max_length=ModelHyperParams.max_length - 2,
clip_last_batch=False).batch_generator
def stack(data_reader, count, clip_last=True):
def __impl__():
res = []
for item in data_reader():
res.append(item)
if len(res) == count:
yield res
res = []
if len(res) == count:
yield res
elif not clip_last:
data = []
for item in res:
data += item
if len(data) > count:
inst_num_per_part = len(data) // count
yield [
data[inst_num_per_part * i:inst_num_per_part * (i + 1)]
for i in range(count)
]
return __impl__
def split(data_reader, count):
def __impl__():
for item in data_reader():
inst_num_per_part = len(item) // count
for i in range(count):
yield item[inst_num_per_part * i:inst_num_per_part * (i + 1
)]
return __impl__
if not args.use_token_batch:
# to make data on each device have similar token number
data_reader = split(data_reader, count)
if args.use_py_reader:
pyreader.decorate_tensor_provider(
py_reader_provider_wrapper(data_reader, place))
data_reader = None
else: # Data generator for multi-devices
data_reader = stack(data_reader, count)
return data_reader
def prepare_feed_dict_list(data_generator, init_flag, count):
"""
Prepare the list of feed dict for multi-devices.
"""
feed_dict_list = []
if data_generator is not None: # use_py_reader == False
data_input_names = encoder_data_input_fields + \
decoder_data_input_fields[:-1] + label_data_input_fields
data = next(data_generator)
for idx, data_buffer in enumerate(data):
data_input_dict, num_token = prepare_batch_input(
data_buffer, data_input_names, ModelHyperParams.eos_idx,
ModelHyperParams.phone_pad_idx, ModelHyperParams.eos_idx,
ModelHyperParams.n_head, ModelHyperParams.d_model)
feed_dict_list.append(data_input_dict)
if init_flag:
for idx in range(count):
pos_enc_tables = dict()
for pos_enc_param_name in pos_enc_param_names:
pos_enc_tables[pos_enc_param_name] = position_encoding_init(
ModelHyperParams.max_length + 1, ModelHyperParams.d_model)
if len(feed_dict_list) <= idx:
feed_dict_list.append(pos_enc_tables)
else:
feed_dict_list[idx] = dict(
list(pos_enc_tables.items()) + list(feed_dict_list[idx]
.items()))
return feed_dict_list if len(feed_dict_list) == count else None
def py_reader_provider_wrapper(data_reader, place):
"""
Data provider needed by fluid.layers.py_reader.
"""
def py_reader_provider():
data_input_names = encoder_data_input_fields + \
decoder_data_input_fields[:-1] + label_data_input_fields
for batch_id, data in enumerate(data_reader()):
data_input_dict, num_token = prepare_batch_input(
data, data_input_names, ModelHyperParams.eos_idx,
ModelHyperParams.phone_pad_idx, ModelHyperParams.eos_idx,
ModelHyperParams.n_head, ModelHyperParams.d_model)
yield [data_input_dict[item] for item in data_input_names]
return py_reader_provider
def test_context(exe, train_exe, dev_count):
# Context to do validation.
test_prog = fluid.Program()
startup_prog = fluid.Program()
if args.enable_ce:
test_prog.random_seed = 1000
startup_prog.random_seed = 1000
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
sum_cost, avg_cost, predict, token_num, pyreader = transformer(
ModelHyperParams.src_vocab_size,
ModelHyperParams.trg_vocab_size,
ModelHyperParams.max_length + 1,
ModelHyperParams.n_layer,
ModelHyperParams.n_head,
ModelHyperParams.d_key,
ModelHyperParams.d_value,
ModelHyperParams.d_model,
ModelHyperParams.d_inner_hid,
ModelHyperParams.prepostprocess_dropout,
ModelHyperParams.attention_dropout,
ModelHyperParams.relu_dropout,
ModelHyperParams.preprocess_cmd,
ModelHyperParams.postprocess_cmd,
ModelHyperParams.weight_sharing,
TrainTaskConfig.label_smooth_eps,
use_py_reader=args.use_py_reader,
beta=ModelHyperParams.beta,
is_test=True)
test_prog = test_prog.clone(for_test=True)
test_data = prepare_data_generator(
args,
is_test=True,
count=dev_count,
pyreader=pyreader,
py_reader_provider_wrapper=py_reader_provider_wrapper)
exe.run(startup_prog) # to init pyreader for testing
if TrainTaskConfig.ckpt_path:
fluid.io.load_persistables(
exe, TrainTaskConfig.ckpt_path, main_program=test_prog)
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.use_experimental_executor = True
build_strategy = fluid.BuildStrategy()
test_exe = fluid.ParallelExecutor(
use_cuda=TrainTaskConfig.use_gpu,
main_program=test_prog,
build_strategy=build_strategy,
exec_strategy=exec_strategy,
share_vars_from=train_exe)
def test(exe=test_exe, pyreader=pyreader):
test_total_cost = 0
test_total_token = 0
if args.use_py_reader:
pyreader.start()
data_generator = None
else:
data_generator = test_data()
while True:
try:
feed_dict_list = prepare_feed_dict_list(data_generator, False,
dev_count)
outs = test_exe.run(fetch_list=[sum_cost.name, token_num.name],
feed=feed_dict_list)
except (StopIteration, fluid.core.EOFException):
# The current pass is over.
if args.use_py_reader:
pyreader.reset()
break
sum_cost_val, token_num_val = np.array(outs[0]), np.array(outs[1])
test_total_cost += sum_cost_val.sum()
test_total_token += token_num_val.sum()
test_avg_cost = test_total_cost / test_total_token
test_ppl = np.exp([min(test_avg_cost, 100)])
return test_avg_cost, test_ppl
return test
def train_loop(exe,
train_prog,
startup_prog,
dev_count,
sum_cost,
avg_cost,
token_num,
predict,
pyreader,
nccl2_num_trainers=1,
nccl2_trainer_id=0):
# Initialize the parameters.
if TrainTaskConfig.ckpt_path:
exe.run(startup_prog) # to init pyreader for training
logging.info("load checkpoint from {}".format(
TrainTaskConfig.ckpt_path))
fluid.io.load_persistables(
exe, TrainTaskConfig.ckpt_path, main_program=train_prog)
else:
logging.info("init fluid.framework.default_startup_program")
exe.run(startup_prog)
logging.info("begin reader")
train_data = prepare_data_generator(
args,
is_test=False,
count=dev_count,
pyreader=pyreader,
py_reader_provider_wrapper=py_reader_provider_wrapper)
# For faster executor
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.use_experimental_executor = True
exec_strategy.num_iteration_per_drop_scope = int(args.fetch_steps)
build_strategy = fluid.BuildStrategy()
# Since the token number differs among devices, customize gradient scale to
# use token average cost among multi-devices. and the gradient scale is
# `1 / token_number` for average cost.
# build_strategy.gradient_scale_strategy = fluid.BuildStrategy.GradientScaleStrategy.Customized
logging.info("begin executor")
train_exe = fluid.ParallelExecutor(
use_cuda=TrainTaskConfig.use_gpu,
loss_name=avg_cost.name,
main_program=train_prog,
build_strategy=build_strategy,
exec_strategy=exec_strategy,
num_trainers=nccl2_num_trainers,
trainer_id=nccl2_trainer_id)
if args.val_file_pattern is not None:
test = test_context(exe, train_exe, dev_count)
# the best cross-entropy value with label smoothing
loss_normalizer = -((1. - TrainTaskConfig.label_smooth_eps) * np.log(
(1. - TrainTaskConfig.label_smooth_eps
)) + TrainTaskConfig.label_smooth_eps *
np.log(TrainTaskConfig.label_smooth_eps / (
ModelHyperParams.trg_vocab_size - 1) + 1e-20))
step_idx = 0
init_flag = True
logging.info("begin train")
for pass_id in six.moves.xrange(TrainTaskConfig.pass_num):
pass_start_time = time.time()
if args.use_py_reader:
pyreader.start()
data_generator = None
else:
data_generator = train_data()
batch_id = 0
while True:
try:
feed_dict_list = prepare_feed_dict_list(data_generator,
init_flag, dev_count)
outs = train_exe.run(
fetch_list=[sum_cost.name, token_num.name]
if step_idx % args.fetch_steps == 0 else [],
feed=feed_dict_list)
if step_idx % args.fetch_steps == 0:
sum_cost_val, token_num_val = np.array(outs[0]), np.array(
outs[1])
# sum the cost from multi-devices
total_sum_cost = sum_cost_val.sum()
total_token_num = token_num_val.sum()
total_avg_cost = total_sum_cost / total_token_num
if step_idx == 0:
logging.info(
"step_idx: %d, epoch: %d, batch: %d, avg loss: %f, "
"normalized loss: %f, ppl: %f" %
(step_idx, pass_id, batch_id, total_avg_cost,
total_avg_cost - loss_normalizer,
np.exp([min(total_avg_cost, 100)])))
avg_batch_time = time.time()
else:
logging.info(
"step_idx: %d, epoch: %d, batch: %d, avg loss: %f, "
"normalized loss: %f, ppl: %f, speed: %.2f step/s" %
(step_idx, pass_id, batch_id, total_avg_cost,
total_avg_cost - loss_normalizer, np.exp(
[min(total_avg_cost, 100)]),
args.fetch_steps / (time.time() - avg_batch_time)))
avg_batch_time = time.time()
if step_idx % TrainTaskConfig.save_freq == 0 and step_idx > 0:
fluid.io.save_persistables(
exe,
os.path.join(TrainTaskConfig.ckpt_dir,
"latest.checkpoint"), train_prog)
fluid.io.save_params(
exe,
os.path.join(TrainTaskConfig.model_dir,
"iter_" + str(step_idx) + ".infer.model"),
train_prog)
init_flag = False
batch_id += 1
step_idx += 1
except (StopIteration, fluid.core.EOFException):
# The current pass is over.
if args.use_py_reader:
pyreader.reset()
break
time_consumed = time.time() - pass_start_time
# Validate and save the persistable.
if args.val_file_pattern is not None:
val_avg_cost, val_ppl = test()
logging.info(
"epoch: %d, val avg loss: %f, val normalized loss: %f, val ppl: %f,"
" consumed %fs" % (pass_id, val_avg_cost,
val_avg_cost - loss_normalizer, val_ppl,
time_consumed))
else:
logging.info("epoch: %d, consumed %fs" % (pass_id, time_consumed))
if not args.enable_ce:
fluid.io.save_persistables(
exe,
os.path.join(TrainTaskConfig.ckpt_dir,
"pass_" + str(pass_id) + ".checkpoint"),
train_prog)
if args.enable_ce: # For CE
print("kpis\ttrain_cost_card%d\t%f" % (dev_count, total_avg_cost))
if args.val_file_pattern is not None:
print("kpis\ttest_cost_card%d\t%f" % (dev_count, val_avg_cost))
print("kpis\ttrain_duration_card%d\t%f" % (dev_count, time_consumed))
def train(args):
# priority: ENV > args > config
is_local = os.getenv("PADDLE_IS_LOCAL", "1")
if is_local == '0':
args.local = False
logging.info(args)
if args.device == 'CPU':
TrainTaskConfig.use_gpu = False
training_role = os.getenv("TRAINING_ROLE", "TRAINER")
if training_role == "PSERVER" or (not TrainTaskConfig.use_gpu):
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
exe = fluid.Executor(place)
train_prog = fluid.Program()
startup_prog = fluid.Program()
if args.enable_ce:
train_prog.random_seed = 1000
startup_prog.random_seed = 1000
with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard():
sum_cost, avg_cost, predict, token_num, pyreader = transformer(
ModelHyperParams.src_vocab_size,
ModelHyperParams.trg_vocab_size,
ModelHyperParams.phone_vocab_size,
ModelHyperParams.max_length + 1,
ModelHyperParams.n_layer,
ModelHyperParams.n_head,
ModelHyperParams.d_key,
ModelHyperParams.d_value,
ModelHyperParams.d_model,
ModelHyperParams.d_inner_hid,
ModelHyperParams.prepostprocess_dropout,
ModelHyperParams.attention_dropout,
ModelHyperParams.relu_dropout,
ModelHyperParams.preprocess_cmd,
ModelHyperParams.postprocess_cmd,
ModelHyperParams.weight_sharing,
TrainTaskConfig.label_smooth_eps,
ModelHyperParams.beta,
ModelHyperParams.bos_idx,
use_py_reader=args.use_py_reader,
is_test=False)
optimizer = None
if args.sync:
lr_decay = fluid.layers.learning_rate_scheduler.noam_decay(
ModelHyperParams.d_model, TrainTaskConfig.warmup_steps)
logging.info("before adam")
with fluid.default_main_program()._lr_schedule_guard():
learning_rate = lr_decay * TrainTaskConfig.learning_rate
optimizer = fluid.optimizer.Adam(
learning_rate=learning_rate,
beta1=TrainTaskConfig.beta1,
beta2=TrainTaskConfig.beta2,
epsilon=TrainTaskConfig.eps)
else:
optimizer = fluid.optimizer.SGD(0.003)
optimizer.minimize(avg_cost)
if args.local:
logging.info("local start_up:")
train_loop(exe, train_prog, startup_prog, dev_count, sum_cost, avg_cost,
token_num, predict, pyreader)
else:
if args.update_method == "nccl2":
trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
port = os.getenv("PADDLE_PORT")
worker_ips = os.getenv("PADDLE_TRAINERS")
worker_endpoints = []
for ip in worker_ips.split(","):
worker_endpoints.append(':'.join([ip, port]))
trainers_num = len(worker_endpoints)
current_endpoint = os.getenv("POD_IP") + ":" + port
if trainer_id == 0:
logging.info("train_id == 0, sleep 60s")
time.sleep(60)
logging.info("trainers_num:{}".format(trainers_num))
logging.info("worker_endpoints:{}".format(worker_endpoints))
logging.info("current_endpoint:{}".format(current_endpoint))
append_nccl2_prepare(startup_prog, trainer_id, worker_endpoints,
current_endpoint)
train_loop(exe, train_prog, startup_prog, dev_count, sum_cost,
avg_cost, token_num, predict, pyreader, trainers_num,
trainer_id)
return
port = os.getenv("PADDLE_PORT", "6174")
pserver_ips = os.getenv("PADDLE_PSERVERS") # ip,ip...
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist) # ip:port,ip:port...
trainers = int(os.getenv("PADDLE_TRAINERS_NUM", "0"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
logging.info("pserver_endpoints:{}".format(pserver_endpoints))
logging.info("current_endpoint:{}".format(current_endpoint))
logging.info("trainer_id:{}".format(trainer_id))
logging.info("pserver_ips:{}".format(pserver_ips))
logging.info("port:{}".format(port))
t = fluid.DistributeTranspiler()
t.transpile(
trainer_id,
pservers=pserver_endpoints,
trainers=trainers,
program=train_prog,
startup_program=startup_prog)
if training_role == "PSERVER":
logging.info("distributed: pserver started")
current_endpoint = os.getenv("POD_IP") + ":" + os.getenv(
"PADDLE_PORT")
if not current_endpoint:
logging.critical("need env SERVER_ENDPOINT")
exit(1)
pserver_prog = t.get_pserver_program(current_endpoint)
pserver_startup = t.get_startup_program(current_endpoint,
pserver_prog)
exe.run(pserver_startup)
exe.run(pserver_prog)
elif training_role == "TRAINER":
logging.info("distributed: trainer started")
trainer_prog = t.get_trainer_program()
train_loop(exe, train_prog, startup_prog, dev_count, sum_cost,
avg_cost, token_num, predict, pyreader)
else:
logging.critical(
"environment var TRAINER_ROLE should be TRAINER os PSERVER")
exit(1)
if __name__ == "__main__":
LOG_FORMAT = "[%(asctime)s %(levelname)s %(filename)s:%(lineno)d] %(message)s"
logging.basicConfig(
stream=sys.stdout, level=logging.DEBUG, format=LOG_FORMAT)
logging.getLogger().setLevel(logging.INFO)
args = parse_args()
train(args)
|
[] |
[] |
[
"PADDLE_TRAINER_ID",
"TRAINING_ROLE",
"CPU_NUM",
"POD_IP",
"PADDLE_TRAINERS",
"PADDLE_PSERVERS",
"PADDLE_IS_LOCAL",
"PADDLE_PORT",
"PADDLE_TRAINERS_NUM"
] |
[]
|
["PADDLE_TRAINER_ID", "TRAINING_ROLE", "CPU_NUM", "POD_IP", "PADDLE_TRAINERS", "PADDLE_PSERVERS", "PADDLE_IS_LOCAL", "PADDLE_PORT", "PADDLE_TRAINERS_NUM"]
|
python
| 9 | 0 | |
shadowsocks/asyncdns.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
if __name__ == '__main__':
import sys
import inspect
file_path = os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0, os.path.join(file_path, '../'))
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d_-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def detect_ipv6_supprot():
if 'has_ipv6' in dir(socket):
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
print('IPv6 support')
return True
except:
pass
print('IPv6 not support')
return False
IPV6_CONNECTION_SUPPORT = detect_ipv6_supprot()
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
self._servers = None
self._parse_resolv()
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('dns.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
parts = line.split(b' ', 1)
if len(parts) >= 2:
server = parts[0]
port = int(parts[1])
else:
server = parts[0]
port = 53
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append((server, port))
except IOError:
pass
if not self._servers:
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append((server, 53))
except IOError:
pass
if not self._servers:
self._servers = [('8.8.4.4', 53), ('8.8.8.8', 53)]
logging.info('dns server: %s' % (self._servers,))
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
if b"#" in line:
line = line[:line.find(b'#')]
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unable to parse hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if IPV6_CONNECTION_SUPPORT:
if not ip and self._hostname_status.get(hostname, STATUS_IPV4) \
== STATUS_IPV6:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV4:
for question in response.questions:
if question[1] == QTYPE_A:
self._call_callback(hostname, None)
break
else:
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, server)
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
if False:
addrs = socket.getaddrinfo(hostname, 0, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if addrs:
af, socktype, proto, canonname, sa = addrs[0]
logging.debug('DNS resolve %s %s' % (hostname, sa[0]) )
self._cache[hostname] = sa[0]
callback((hostname, sa[0]), None)
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
if IPV6_CONNECTION_SUPPORT:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
if IPV6_CONNECTION_SUPPORT:
self._send_req(hostname, QTYPE_AAAA)
else:
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&[email protected]', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
|
[] |
[] |
[
"WINDIR"
] |
[]
|
["WINDIR"]
|
python
| 1 | 0 | |
pkg/pqdriver/postgresql_driver.go
|
package pqdriver
import (
"fmt"
"log"
"os"
"strconv"
"time"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
)
const (
DefaultPort = "5432"
SSLModeVerifyFull = "verify-full"
SSLModeDisable = "disable"
SSLModeRequire = "require"
)
// PostgreSQLDriver is the interface
type PostgreSQLDriver interface {
Connect() *sqlx.DB
}
// Config is a model for connect PosgreSQL
type Config struct {
User string
Pass string
Host string
DatabaseName string
Port string
SSLMode string
MaxLifetime string
MaxIdleConns string
MaxOpenConns string
}
type postgresDB struct {
Conf Config
}
func (db *postgresDB) Connect() *sqlx.DB {
if db.Conf.SSLMode == "" {
db.Conf.SSLMode = SSLModeDisable
}
dsName := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
db.Conf.Host, db.Conf.Port, db.Conf.User, db.Conf.Pass, db.Conf.DatabaseName, db.Conf.SSLMode)
conn, err := sqlx.Connect("postgres", dsName)
maxOpenConns, _ := strconv.Atoi(db.Conf.MaxOpenConns)
maxIdleConns, _ := strconv.Atoi(db.Conf.MaxIdleConns)
maxLifetime, _ := strconv.Atoi(db.Conf.MaxLifetime)
if maxOpenConns > 0 {
conn.SetMaxOpenConns(maxOpenConns) // The default is 0 (unlimited), ex: 1000
}
if maxIdleConns > 0 {
conn.SetMaxIdleConns(maxIdleConns) // The default maxIdleConns = 2, ex: 10
}
conn.SetConnMaxLifetime(time.Duration(maxLifetime)) // MaxLifetime = 0, Connections are reused forever
if err != nil {
log.Fatalln(err)
} else {
log.Println("PostgreSQL Connected!")
}
return conn
}
// New for create PostgresSQL driver
func New(config Config) PostgreSQLDriver {
return &postgresDB{
Conf: config,
}
}
// ConfigEnv for create Config by Env
func ConfigEnv() Config {
return Config{
User: os.Getenv("POSTGRES_USER"),
Pass: os.Getenv("POSTGRES_PASS"),
Host: os.Getenv("POSTGRES_HOST"),
DatabaseName: os.Getenv("POSTGRES_DATABASE"),
Port: os.Getenv("POSTGRES_PORT"),
// The default SSL mode is "disable", ex: "verify-full"
SSLMode: os.Getenv("POSTGRES_SSL_MODE"),
// The default maxLifetime = 0, Connections are reused forever, ex: "60"
MaxLifetime: os.Getenv("POSTGRES_MAX_LIFETIME"),
// The default maxIdleConns = 2, ex: 10
MaxIdleConns: os.Getenv("POSTGRES_MAX_IDLE_CONNS"),
// The default is 0 (unlimited), ex: 1000
MaxOpenConns: os.Getenv("POSTGRES_MAX_OPEN_CONNS"),
}
}
|
[
"\"POSTGRES_USER\"",
"\"POSTGRES_PASS\"",
"\"POSTGRES_HOST\"",
"\"POSTGRES_DATABASE\"",
"\"POSTGRES_PORT\"",
"\"POSTGRES_SSL_MODE\"",
"\"POSTGRES_MAX_LIFETIME\"",
"\"POSTGRES_MAX_IDLE_CONNS\"",
"\"POSTGRES_MAX_OPEN_CONNS\""
] |
[] |
[
"POSTGRES_SSL_MODE",
"POSTGRES_USER",
"POSTGRES_PASS",
"POSTGRES_MAX_LIFETIME",
"POSTGRES_HOST",
"POSTGRES_PORT",
"POSTGRES_MAX_IDLE_CONNS",
"POSTGRES_MAX_OPEN_CONNS",
"POSTGRES_DATABASE"
] |
[]
|
["POSTGRES_SSL_MODE", "POSTGRES_USER", "POSTGRES_PASS", "POSTGRES_MAX_LIFETIME", "POSTGRES_HOST", "POSTGRES_PORT", "POSTGRES_MAX_IDLE_CONNS", "POSTGRES_MAX_OPEN_CONNS", "POSTGRES_DATABASE"]
|
go
| 9 | 0 | |
src/bairdrus/discord.go
|
package main
// Program Name: discord.go
// Author Name: Jordan Edward Shea <[email protected]>
// Description: This is the main program for launching the the Discord bot.
import (
"os"
"github.com/bwmarrin/discordgo"
"github.com/pkg/errors"
"log"
"fmt"
"os/signal"
"syscall"
)
func main() {
// Bot token should be stored securely as an environment variable
var fightBotToken = os.Getenv("fightBotToken")
if fightBotToken == "" {
log.Println("Error: fightBotToken was not found as an" +
" environment variable.")
return
}
// Attempts to create fight-bot using provided token
fightBot, err := discordgo.New("Bot " + fightBotToken)
if err != nil {
log.Println("Error creating the Discord session: ",
errors.WithStack(err))
return
}
// Error will be thrown if an invalid token was provided earlier
err = fightBot.Open()
if err != nil {
log.Println("Error with opening the connection: ",
errors.WithStack(err))
return
}
fmt.Println("Bot is now running. Press CTRL-C to exit.")
// Info on Go signals can be found at: https://gobyexample.com/signals
// sc will be notified of 4 types of signals (SIGINT, SIGTERM, Inter, Kill)
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Cleanly close the Discord session.
fightBot.Close()
}
|
[
"\"fightBotToken\""
] |
[] |
[
"fightBotToken"
] |
[]
|
["fightBotToken"]
|
go
| 1 | 0 | |
internal/conf/computed.go
|
package conf
import (
"context"
"log"
"os"
"strconv"
"strings"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf/confdefaults"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/schema"
)
func init() {
deployType := DeployType()
if !IsValidDeployType(deployType) {
log.Fatalf("The 'DEPLOY_TYPE' environment variable is invalid. Expected one of: %q, %q, %q, %q, %q. Got: %q", DeployKubernetes, DeployDockerCompose, DeployPureDocker, DeploySingleDocker, DeployDev, deployType)
}
confdefaults.Default = defaultConfigForDeployment()
}
func defaultConfigForDeployment() conftypes.RawUnified {
deployType := DeployType()
switch {
case IsDev(deployType):
return confdefaults.DevAndTesting
case IsDeployTypeSingleDockerContainer(deployType):
return confdefaults.DockerContainer
case IsDeployTypeKubernetes(deployType), IsDeployTypeDockerCompose(deployType), IsDeployTypePureDocker(deployType):
return confdefaults.KubernetesOrDockerComposeOrPureDocker
default:
panic("deploy type did not register default configuration")
}
}
func AWSCodeCommitConfigs(ctx context.Context) ([]*schema.AWSCodeCommitConnection, error) {
var config []*schema.AWSCodeCommitConnection
if err := api.InternalClient.ExternalServiceConfigs(ctx, extsvc.KindAWSCodeCommit, &config); err != nil {
return nil, err
}
return config, nil
}
func BitbucketServerConfigs(ctx context.Context) ([]*schema.BitbucketServerConnection, error) {
var config []*schema.BitbucketServerConnection
if err := api.InternalClient.ExternalServiceConfigs(ctx, extsvc.KindBitbucketServer, &config); err != nil {
return nil, err
}
return config, nil
}
func GitHubConfigs(ctx context.Context) ([]*schema.GitHubConnection, error) {
var config []*schema.GitHubConnection
if err := api.InternalClient.ExternalServiceConfigs(ctx, extsvc.KindGitHub, &config); err != nil {
return nil, err
}
return config, nil
}
func GitLabConfigs(ctx context.Context) ([]*schema.GitLabConnection, error) {
var config []*schema.GitLabConnection
if err := api.InternalClient.ExternalServiceConfigs(ctx, extsvc.KindGitLab, &config); err != nil {
return nil, err
}
return config, nil
}
func GitoliteConfigs(ctx context.Context) ([]*schema.GitoliteConnection, error) {
var config []*schema.GitoliteConnection
if err := api.InternalClient.ExternalServiceConfigs(ctx, extsvc.KindGitolite, &config); err != nil {
return nil, err
}
return config, nil
}
func PhabricatorConfigs(ctx context.Context) ([]*schema.PhabricatorConnection, error) {
var config []*schema.PhabricatorConnection
if err := api.InternalClient.ExternalServiceConfigs(ctx, extsvc.KindPhabricator, &config); err != nil {
return nil, err
}
return config, nil
}
type AccessTokAllow string
const (
AccessTokensNone AccessTokAllow = "none"
AccessTokensAll AccessTokAllow = "all-users-create"
AccessTokensAdmin AccessTokAllow = "site-admin-create"
)
// AccessTokensAllow returns whether access tokens are enabled, disabled, or restricted to creation by admin users.
func AccessTokensAllow() AccessTokAllow {
cfg := Get().AuthAccessTokens
if cfg == nil {
return AccessTokensAll
}
switch cfg.Allow {
case "":
return AccessTokensAll
case string(AccessTokensAll):
return AccessTokensAll
case string(AccessTokensNone):
return AccessTokensNone
case string(AccessTokensAdmin):
return AccessTokensAdmin
default:
return AccessTokensNone
}
}
// EmailVerificationRequired returns whether users must verify an email address before they
// can perform most actions on this site.
//
// It's false for sites that do not have an email sending API key set up.
func EmailVerificationRequired() bool {
return Get().EmailSmtp != nil
}
// CanSendEmail returns whether the site can send emails (e.g., to reset a password or
// invite a user to an org).
//
// It's false for sites that do not have an email sending API key set up.
func CanSendEmail() bool {
return Get().EmailSmtp != nil
}
// Deploy type constants. Any changes here should be reflected in the DeployType type declared in web/src/globals.d.ts:
// https://sourcegraph.com/search?q=r:github.com/sourcegraph/sourcegraph%24+%22type+DeployType%22
const (
DeployKubernetes = "kubernetes"
DeploySingleDocker = "docker-container"
DeployDockerCompose = "docker-compose"
DeployPureDocker = "pure-docker"
DeployDev = "dev"
)
// DeployType tells the deployment type.
func DeployType() string {
if e := os.Getenv("DEPLOY_TYPE"); e != "" {
return e
}
// Default to Kubernetes cluster so that every Kubernetes c
// cluster deployment doesn't need to be configured with DEPLOY_TYPE.
return DeployKubernetes
}
// IsDeployTypeKubernetes tells if the given deployment type is a Kubernetes
// cluster (and non-dev, not docker-compose, not pure-docker, and non-single Docker image).
func IsDeployTypeKubernetes(deployType string) bool {
switch deployType {
// includes older Kubernetes aliases for backwards compatibility
case "k8s", "cluster", DeployKubernetes:
return true
}
return false
}
// IsDeployTypeDockerCompose tells if the given deployment type is the Docker Compose
// deployment (and non-dev, not pure-docker, non-cluster, and non-single Docker image).
func IsDeployTypeDockerCompose(deployType string) bool {
return deployType == DeployDockerCompose
}
// IsDeployTypePureDocker tells if the given deployment type is the pure Docker
// deployment (and non-dev, not docker-compose, non-cluster, and non-single Docker image).
func IsDeployTypePureDocker(deployType string) bool {
return deployType == DeployPureDocker
}
// IsDeployTypeSingleDockerContainer tells if the given deployment type is Docker sourcegraph/server
// single-container (non-Kubernetes, not docker-compose, not pure-docker, non-cluster, non-dev).
func IsDeployTypeSingleDockerContainer(deployType string) bool {
return deployType == DeploySingleDocker
}
// IsDev tells if the given deployment type is "dev".
func IsDev(deployType string) bool {
return deployType == DeployDev
}
// IsValidDeployType returns true iff the given deployType is a Kubernetes deployment, a Docker Compose
// deployment, a pure Docker deployment, a Docker deployment, or a local development environment.
func IsValidDeployType(deployType string) bool {
return IsDeployTypeKubernetes(deployType) ||
IsDeployTypeDockerCompose(deployType) ||
IsDeployTypePureDocker(deployType) ||
IsDeployTypeSingleDockerContainer(deployType) ||
IsDev(deployType)
}
// UpdateChannel tells the update channel. Default is "release".
func UpdateChannel() string {
channel := Get().UpdateChannel
if channel == "" {
return "release"
}
return channel
}
// SearchIndexEnabled returns true if sourcegraph should index all
// repositories for text search. If the configuration is unset, it returns
// false for the docker server image (due to resource usage) but true
// elsewhere. Additionally it also checks for the outdated environment
// variable INDEXED_SEARCH.
func SearchIndexEnabled() bool {
if v := Get().SearchIndexEnabled; v != nil {
return *v
}
if v := os.Getenv("INDEXED_SEARCH"); v != "" {
enabled, _ := strconv.ParseBool(v)
return enabled
}
return DeployType() != DeploySingleDocker
}
func CampaignsEnabled() bool {
if enabled := Get().CampaignsEnabled; enabled != nil {
return *enabled
}
return true
}
func ExternalURL() string {
return Get().ExternalURL
}
func UsingExternalURL() bool {
url := Get().ExternalURL
return !(url == "" || strings.HasPrefix(url, "http://localhost") || strings.HasPrefix(url, "https://localhost") || strings.HasPrefix(url, "http://127.0.0.1") || strings.HasPrefix(url, "https://127.0.0.1")) // CI:LOCALHOST_OK
}
func IsExternalURLSecure() bool {
return strings.HasPrefix(Get().ExternalURL, "https:")
}
func IsBuiltinSignupAllowed() bool {
provs := Get().AuthProviders
for _, prov := range provs {
if prov.Builtin != nil {
return prov.Builtin.AllowSignup
}
}
return false
}
// SearchSymbolsParallelism returns 20, or the site config
// "debug.search.symbolsParallelism" value if configured.
func SearchSymbolsParallelism() int {
val := Get().DebugSearchSymbolsParallelism
if val == 0 {
return 20
}
return val
}
func BitbucketServerPluginPerm() bool {
val := Get().ExperimentalFeatures.BitbucketServerFastPerm
return val == "enabled"
}
func EventLoggingEnabled() bool {
val := Get().ExperimentalFeatures.EventLogging
if val == "" {
return true
}
return val == "enabled"
}
func StructuralSearchEnabled() bool {
val := Get().ExperimentalFeatures.StructuralSearch
if val == "" {
return true
}
return val == "enabled"
}
func AndOrQueryEnabled() bool {
e := Get().ExperimentalFeatures
if e == nil || e.AndOrQuery == "" {
return true
}
return e.AndOrQuery == "enabled"
}
func ExperimentalFeatures() schema.ExperimentalFeatures {
val := Get().ExperimentalFeatures
if val == nil {
return schema.ExperimentalFeatures{}
}
return *val
}
// AuthMinPasswordLength returns the value of minimum password length requirement.
// If not set, it returns the default value 12.
func AuthMinPasswordLength() int {
val := Get().AuthMinPasswordLength
if val <= 0 {
return 12
}
return val
}
// By default, password reset links are valid for 4 hours.
const defaultPasswordLinkExpiry = 14400
// AuthPasswordResetLinkExpiry returns the time (in seconds) indicating how long password
// reset links are considered valid. If not set, it returns the default value.
func AuthPasswordResetLinkExpiry() int {
val := Get().AuthPasswordResetLinkExpiry
if val <= 0 {
return defaultPasswordLinkExpiry
}
return val
}
type ExternalServiceMode int
const (
ExternalServiceModeDisabled ExternalServiceMode = 0
ExternalServiceModePublic ExternalServiceMode = 1
ExternalServiceModeAll ExternalServiceMode = 2
)
// ExternalServiceUserMode returns the mode describing if users are allowed to add external services
// for public and private repositories.
func ExternalServiceUserMode() ExternalServiceMode {
switch Get().ExternalServiceUserMode {
case "public":
return ExternalServiceModePublic
case "all":
return ExternalServiceModeAll
default:
return ExternalServiceModeDisabled
}
}
|
[
"\"DEPLOY_TYPE\"",
"\"INDEXED_SEARCH\""
] |
[] |
[
"DEPLOY_TYPE",
"INDEXED_SEARCH"
] |
[]
|
["DEPLOY_TYPE", "INDEXED_SEARCH"]
|
go
| 2 | 0 | |
test/extended/util/test.go
|
package util
import (
"flag"
"fmt"
"os"
"path"
"regexp"
"strings"
"testing"
"github.com/golang/glog"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
"github.com/onsi/gomega"
kapiv1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
kclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
e2e "k8s.io/kubernetes/test/e2e/framework"
authorizationclient "github.com/openshift/origin/pkg/authorization/generated/internalclientset"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
"github.com/openshift/origin/pkg/oc/admin/policy"
securityclient "github.com/openshift/origin/pkg/security/generated/internalclientset"
"github.com/openshift/origin/pkg/version"
testutil "github.com/openshift/origin/test/util"
)
var (
reportFileName string
syntheticSuite string
quiet bool
)
var TestContext *e2e.TestContextType = &e2e.TestContext
// init initialize the extended testing suite.
// You can set these environment variables to configure extended tests:
// KUBECONFIG - Path to kubeconfig containing embedded authinfo
// TEST_REPORT_DIR - If set, JUnit output will be written to this directory for each test
// TEST_REPORT_FILE_NAME - If set, will determine the name of the file that JUnit output is written to
func InitTest() {
// interpret synthetic input in `--ginkgo.focus` and/or `--ginkgo.skip`
ginkgo.BeforeEach(checkSyntheticInput)
e2e.RegisterCommonFlags()
e2e.RegisterClusterFlags()
flag.StringVar(&syntheticSuite, "suite", "", "DEPRECATED: Optional suite selector to filter which tests are run. Use focus.")
TestContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false"
TestContext.VerifyServiceAccount = true
TestContext.RepoRoot = os.Getenv("KUBE_REPO_ROOT")
TestContext.KubeVolumeDir = os.Getenv("VOLUME_DIR")
if len(TestContext.KubeVolumeDir) == 0 {
TestContext.KubeVolumeDir = "/var/lib/origin/volumes"
}
TestContext.KubectlPath = "kubectl"
TestContext.KubeConfig = KubeConfigPath()
os.Setenv("KUBECONFIG", TestContext.KubeConfig)
// load and set the host variable for kubectl
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: TestContext.KubeConfig}, &clientcmd.ConfigOverrides{})
cfg, err := clientConfig.ClientConfig()
if err != nil {
FatalErr(err)
}
TestContext.Host = cfg.Host
reportFileName = os.Getenv("TEST_REPORT_FILE_NAME")
if reportFileName == "" {
reportFileName = "junit"
}
quiet = os.Getenv("TEST_OUTPUT_QUIET") == "true"
// Ensure that Kube tests run privileged (like they do upstream)
TestContext.CreateTestingNS = createTestingNS
glog.Infof("Extended test version %s", version.Get().String())
}
func ExecuteTest(t *testing.T, suite string) {
var r []ginkgo.Reporter
if dir := os.Getenv("TEST_REPORT_DIR"); len(dir) > 0 {
TestContext.ReportDir = dir
}
if TestContext.ReportDir != "" {
if err := os.MkdirAll(TestContext.ReportDir, 0755); err != nil {
glog.Errorf("Failed creating report directory: %v", err)
}
defer e2e.CoreDump(TestContext.ReportDir)
}
switch syntheticSuite {
case "parallel.conformance.openshift.io":
if len(config.GinkgoConfig.FocusString) > 0 {
config.GinkgoConfig.FocusString += "|"
}
config.GinkgoConfig.FocusString = "\\[Suite:openshift/conformance/parallel\\]"
case "serial.conformance.openshift.io":
if len(config.GinkgoConfig.FocusString) > 0 {
config.GinkgoConfig.FocusString += "|"
}
config.GinkgoConfig.FocusString = "\\[Suite:openshift/conformance/serial\\]"
}
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = "Skipped"
}
gomega.RegisterFailHandler(ginkgo.Fail)
if TestContext.ReportDir != "" {
r = append(r, reporters.NewJUnitReporter(path.Join(TestContext.ReportDir, fmt.Sprintf("%s_%02d.xml", reportFileName, config.GinkgoConfig.ParallelNode))))
}
ginkgo.WalkTests(func(name string, node types.TestNode) {
isSerial := serialTestsFilter.MatchString(name)
if isSerial {
if !strings.Contains(name, "[Serial]") {
node.SetText(node.Text() + " [Serial]")
}
}
if !excludedTestsFilter.MatchString(name) {
include := conformanceTestsFilter.MatchString(name)
switch {
case !include:
// do nothing
case isSerial:
node.SetText(node.Text() + " [Suite:openshift/conformance/serial]")
case include:
node.SetText(node.Text() + " [Suite:openshift/conformance/parallel]")
}
}
if strings.Contains(node.CodeLocation().FileName, "/origin/test/") && !strings.Contains(node.Text(), "[Suite:openshift") {
node.SetText(node.Text() + " [Suite:openshift]")
}
if strings.Contains(node.CodeLocation().FileName, "/kubernetes/test/e2e/") {
node.SetText(node.Text() + " [Suite:k8s]")
}
})
if quiet {
r = append(r, NewSimpleReporter())
ginkgo.RunSpecsWithCustomReporters(t, suite, r)
} else {
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, suite, r)
}
}
// TODO: Use either explicit tags (k8s.io) or https://github.com/onsi/ginkgo/pull/228 to implement this.
// isPackage determines wether the test is in a package. Ideally would be implemented in ginkgo.
func isPackage(pkg string) bool {
return strings.Contains(ginkgo.CurrentGinkgoTestDescription().FileName, pkg)
}
// TODO: For both is*Test functions, use either explicit tags (k8s.io) or https://github.com/onsi/ginkgo/pull/228
func isOriginTest() bool {
return isPackage("/origin/test/")
}
func isKubernetesE2ETest() bool {
return isPackage("/kubernetes/test/e2e/")
}
func testNameContains(name string) bool {
return strings.Contains(ginkgo.CurrentGinkgoTestDescription().FullTestText, name)
}
func skipTestNamespaceCustomization() bool {
return (isPackage("/kubernetes/test/e2e/namespace.go") && (testNameContains("should always delete fast") || testNameContains("should delete fast enough")))
}
// Holds custom namespace creation functions so we can customize per-test
var customCreateTestingNSFuncs = map[string]e2e.CreateTestingNSFn{}
// Registers a namespace creation function for the given basename
// Fails if a create function is already registered
func setCreateTestingNSFunc(baseName string, fn e2e.CreateTestingNSFn) {
if _, exists := customCreateTestingNSFuncs[baseName]; exists {
FatalErr("Double registered custom namespace creation function for " + baseName)
}
customCreateTestingNSFuncs[baseName] = fn
}
// createTestingNS delegates to custom namespace creation functions if registered.
// otherwise, it ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs
func createTestingNS(baseName string, c kclientset.Interface, labels map[string]string) (*kapiv1.Namespace, error) {
// If a custom function exists, call it
if fn, exists := customCreateTestingNSFuncs[baseName]; exists {
return fn(baseName, c, labels)
}
// Otherwise use the upstream default
ns, err := e2e.CreateTestingNS(baseName, c, labels)
if err != nil {
return ns, err
}
// Add anyuid and privileged permissions for upstream tests
if isKubernetesE2ETest() && !skipTestNamespaceCustomization() {
clientConfig, err := testutil.GetClusterAdminClientConfig(KubeConfigPath())
if err != nil {
return ns, err
}
securityClient, err := securityclient.NewForConfig(clientConfig)
if err != nil {
return ns, err
}
e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
// add the "privileged" scc to ensure pods that explicitly
// request extra capabilities are not rejected
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "privileged")
// add the "anyuid" scc to ensure pods that don't specify a
// uid don't get forced into a range (mimics upstream
// behavior)
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "anyuid")
// add the "hostmount-anyuid" scc to ensure pods using hostPath
// can execute tests
addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "hostmount-anyuid")
// The intra-pod test requires that the service account have
// permission to retrieve service endpoints.
authorizationClient, err := authorizationclient.NewForConfig(clientConfig)
if err != nil {
return ns, err
}
addRoleToE2EServiceAccounts(authorizationClient, []kapiv1.Namespace{*ns}, bootstrappolicy.ViewRoleName)
// in practice too many kube tests ignore scheduling constraints
allowAllNodeScheduling(c, ns.Name)
}
return ns, err
}
var (
excludedTests = []string{
`\[Skipped\]`,
`\[Slow\]`,
`\[Flaky\]`,
`\[Disruptive\]`,
`\[local\]`,
// not enabled in Origin yet
//`\[Feature:GarbageCollector\]`,
// Doesn't work on scaled up clusters
`\[Feature:ImagePrune\]`,
`\[Feature:ImageMirror\]`,
// Quota isn't turned on by default, we should do that and then reenable these tests
`\[Feature:ImageQuota\]`,
// Currently disabled by default
`\[Feature:Initializers\]`,
// Needs special configuration
`\[Feature:Audit\]`,
// Depends on external components, may not need yet
`Monitoring`, // Not installed, should be
`Cluster level logging`, // Not installed yet
`Kibana`, // Not installed
`Ubernetes`, // Can't set zone labels today
`kube-ui`, // Not installed by default
`^Kubernetes Dashboard`, // Not installed by default (also probably slow image pull)
`\[Feature:Federation\]`, // Not enabled yet
`\[Feature:Federation12\]`, // Not enabled yet
`Ingress`, // Not enabled yet
`Cinder`, // requires an OpenStack cluster
`should support r/w`, // hostPath: This test expects that host's tmp dir is WRITABLE by a container. That isn't something we need to guarantee for openshift.
`should check that the kubernetes-dashboard instance is alive`, // we don't create this
// `\[Feature:ManualPerformance\]`, // requires /resetMetrics which we don't expose
// See the CanSupport implementation in upstream to determine wether these work.
`Ceph RBD`, // Works if ceph-common Binary installed (but we can't guarantee this on all clusters).
`GlusterFS`, // May work if /sbin/mount.glusterfs to be installed for plugin to work (also possibly blocked by serial pulling)
`should support r/w`, // hostPath: This test expects that host's tmp dir is WRITABLE by a container. That isn't something we need to guarantee for openshift.
// Failing because of https://github.com/openshift/origin/issues/12365 against a real cluster
//`should allow starting 95 pods per node`,
// Need fixing
`Horizontal pod autoscaling`, // needs heapster
//`PersistentVolume`, // https://github.com/openshift/origin/pull/6884 for recycler
`mount an API token into pods`, // We add 6 secrets, not 1
`ServiceAccounts should ensure a single API token exists`, // We create lots of secrets
`should test kube-proxy`, // needs 2 nodes
`authentication: OpenLDAP`, // needs separate setup and bucketing for openldap bootstrapping
`NFS`, // no permissions https://github.com/openshift/origin/pull/6884
`\[Feature:Example\]`, // has cleanup issues
`NodeProblemDetector`, // requires a non-master node to run on
//`unchanging, static URL paths for kubernetes api services`, // the test needs to exclude URLs that are not part of conformance (/logs)
// Needs triage to determine why it is failing
`Addon update`, // TRIAGE
`SSH`, // TRIAGE
`\[Feature:Upgrade\]`, // TRIAGE
`SELinux relabeling`, // https://github.com/openshift/origin/issues/7287
`openshift mongodb replication creating from a template`, // flaking on deployment
//`Update Demo should do a rolling update of a replication controller`, // this is flaky and needs triaging
// Test will never work
`should proxy to cadvisor`, // we don't expose cAdvisor port directly for security reasons
// Need to relax security restrictions
//`validates that InterPod Affinity and AntiAffinity is respected if matching`, // this *may* now be safe
// Requires too many pods per node for the per core defaults
//`should ensure that critical pod is scheduled in case there is no resources available`,
// Need multiple nodes
`validates that InterPodAntiAffinity is respected if matching 2`,
// Inordinately slow tests
`should create and stop a working application`,
//`should always delete fast`, // will be uncommented in etcd3
// We don't install KubeDNS
`should check if Kubernetes master services is included in cluster-info`,
// this tests dns federation configuration via configmap, which we don't support yet
`DNS configMap`,
// this tests the _kube_ downgrade. we don't support that.
`\[Feature:Downgrade\]`,
// upstream flakes
`validates resource limits of pods that are allowed to run`, // can't schedule to master due to node label limits, also fiddly
// TODO undisable:
`should provide basic identity`, // needs a persistent volume provisioner in single node, host path not working
`should idle the service and DeploymentConfig properly`, // idling with a single service and DeploymentConfig [Conformance]
// slow as sin and twice as ugly (11m each)
"Pod should avoid to schedule to node that have avoidPod annotation",
"Pod should be schedule to node that satisify the PodAffinity",
"Pod should be prefer scheduled to node that satisify the NodeAffinity",
}
excludedTestsFilter = regexp.MustCompile(strings.Join(excludedTests, `|`))
// The list of tests to run for the OpenShift conformance suite. Any test
// in this group which cannot be run in parallel must be identified with the
// [Serial] tag or added to the serialTests filter.
conformanceTests = []string{}
conformanceTestsFilter = regexp.MustCompile(strings.Join(conformanceTests, `|`))
// Identifies any tests that by nature must be run in isolation. Every test in this
// category will be given the [Serial] tag if it does not already have it.
serialTests = []string{
`\[Serial\]`,
`\[Disruptive\]`,
`\[Feature:ManualPerformance\]`, // requires isolation
`\[Feature:HighDensityPerformance\]`, // requires no other namespaces
`Service endpoints latency`, // requires low latency
`Clean up pods on node`, // schedules up to max pods per node
`should allow starting 95 pods per node`,
}
serialTestsFilter = regexp.MustCompile(strings.Join(serialTests, `|`))
)
// checkSyntheticInput selects tests based on synthetic skips or focuses
func checkSyntheticInput() {
checkSuiteSkips()
}
// checkSuiteSkips ensures Origin/Kubernetes synthetic skip labels are applied
// DEPRECATED: remove in a future release
func checkSuiteSkips() {
switch {
case isOriginTest():
if strings.Contains(config.GinkgoConfig.SkipString, "Synthetic Origin") {
ginkgo.Skip("skipping all openshift/origin tests")
}
case isKubernetesE2ETest():
if strings.Contains(config.GinkgoConfig.SkipString, "Synthetic Kubernetes") {
ginkgo.Skip("skipping all k8s.io/kubernetes tests")
}
}
}
var longRetry = wait.Backoff{Steps: 100}
// allowAllNodeScheduling sets the annotation on namespace that allows all nodes to be scheduled onto.
func allowAllNodeScheduling(c kclientset.Interface, namespace string) {
err := retry.RetryOnConflict(longRetry, func() error {
ns, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil {
return err
}
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
ns.Annotations["openshift.io/node-selector"] = ""
_, err = c.CoreV1().Namespaces().Update(ns)
return err
})
if err != nil {
FatalErr(err)
}
}
func addE2EServiceAccountsToSCC(securityClient securityclient.Interface, namespaces []kapiv1.Namespace, sccName string) {
// Because updates can race, we need to set the backoff retries to be > than the number of possible
// parallel jobs starting at once. Set very high to allow future high parallelism.
err := retry.RetryOnConflict(longRetry, func() error {
scc, err := securityClient.Security().SecurityContextConstraints().Get(sccName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
return nil
}
return err
}
for _, ns := range namespaces {
if strings.HasPrefix(ns.Name, "e2e-") {
scc.Groups = append(scc.Groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name))
}
}
if _, err := securityClient.Security().SecurityContextConstraints().Update(scc); err != nil {
return err
}
return nil
})
if err != nil {
FatalErr(err)
}
}
func addRoleToE2EServiceAccounts(c authorizationclient.Interface, namespaces []kapiv1.Namespace, roleName string) {
err := retry.RetryOnConflict(longRetry, func() error {
for _, ns := range namespaces {
if strings.HasPrefix(ns.Name, "e2e-") && ns.Status.Phase != kapiv1.NamespaceTerminating {
sa := fmt.Sprintf("system:serviceaccount:%s:default", ns.Name)
addRole := &policy.RoleModificationOptions{
RoleNamespace: "",
RoleName: roleName,
RoleBindingAccessor: policy.NewLocalRoleBindingAccessor(ns.Name, c.Authorization()),
Users: []string{sa},
}
if err := addRole.AddRole(); err != nil {
e2e.Logf("Warning: Failed to add role to e2e service account: %v", err)
}
}
}
return nil
})
if err != nil {
FatalErr(err)
}
}
|
[
"\"DELETE_NAMESPACE\"",
"\"KUBE_REPO_ROOT\"",
"\"VOLUME_DIR\"",
"\"TEST_REPORT_FILE_NAME\"",
"\"TEST_OUTPUT_QUIET\"",
"\"TEST_REPORT_DIR\""
] |
[] |
[
"DELETE_NAMESPACE",
"TEST_REPORT_DIR",
"TEST_REPORT_FILE_NAME",
"TEST_OUTPUT_QUIET",
"VOLUME_DIR",
"KUBE_REPO_ROOT"
] |
[]
|
["DELETE_NAMESPACE", "TEST_REPORT_DIR", "TEST_REPORT_FILE_NAME", "TEST_OUTPUT_QUIET", "VOLUME_DIR", "KUBE_REPO_ROOT"]
|
go
| 6 | 0 | |
elastalert/util.py
|
# -*- coding: utf-8 -*-
import collections
import datetime
import logging
import os
import re
import sys
import dateutil.parser
import pytz
from six import string_types
from . import ElasticSearchClient
from .auth import Auth
logging.basicConfig()
elastalert_logger = logging.getLogger('elastalert')
def get_module(module_name):
""" Loads a module and returns a specific object.
module_name should 'module.file.object'.
Returns object or raises EAException on error. """
sys.path.append(os.getcwd())
try:
module_path, module_class = module_name.rsplit('.', 1)
base_module = __import__(module_path, globals(), locals(), [module_class])
module = getattr(base_module, module_class)
except (ImportError, AttributeError, ValueError) as e:
raise EAException("Could not import module %s: %s" % (module_name, e)).with_traceback(sys.exc_info()[2])
return module
def new_get_event_ts(ts_field):
""" Constructs a lambda that may be called to extract the timestamp field
from a given event.
:returns: A callable function that takes an event and outputs that event's
timestamp field.
"""
return lambda event: lookup_es_key(event[0], ts_field)
def _find_es_dict_by_key(lookup_dict, term):
""" Performs iterative dictionary search based upon the following conditions:
1. Subkeys may either appear behind a full stop (.) or at one lookup_dict level lower in the tree.
2. No wildcards exist within the provided ES search terms (these are treated as string literals)
This is necessary to get around inconsistencies in ES data.
For example:
{'ad.account_name': 'bob'}
Or:
{'csp_report': {'blocked_uri': 'bob.com'}}
And even:
{'juniper_duo.geoip': {'country_name': 'Democratic People's Republic of Korea'}}
We want a search term of form "key.subkey.subsubkey" to match in all cases.
:returns: A tuple with the first element being the dict that contains the key and the second
element which is the last subkey used to access the target specified by the term. None is
returned for both if the key can not be found.
"""
if term in lookup_dict:
return lookup_dict, term
# If the term does not match immediately, perform iterative lookup:
# 1. Split the search term into tokens
# 2. Recurrently concatenate these together to traverse deeper into the dictionary,
# clearing the subkey at every successful lookup.
#
# This greedy approach is correct because subkeys must always appear in order,
# preferring full stops and traversal interchangeably.
#
# Subkeys will NEVER be duplicated between an alias and a traversal.
#
# For example:
# {'foo.bar': {'bar': 'ray'}} to look up foo.bar will return {'bar': 'ray'}, not 'ray'
dict_cursor = lookup_dict
while term:
split_results = re.split(r'\[(\d)\]', term, maxsplit=1)
if len(split_results) == 3:
sub_term, index, term = split_results
index = int(index)
else:
sub_term, index, term = split_results + [None, '']
subkeys = sub_term.split('.')
subkey = ''
while len(subkeys) > 0:
if not dict_cursor:
return {}, None
subkey += subkeys.pop(0)
if subkey in dict_cursor:
if len(subkeys) == 0:
break
dict_cursor = dict_cursor[subkey]
subkey = ''
elif len(subkeys) == 0:
# If there are no keys left to match, return None values
dict_cursor = None
subkey = None
else:
subkey += '.'
if index is not None and subkey:
dict_cursor = dict_cursor[subkey]
if type(dict_cursor) == list and len(dict_cursor) > index:
subkey = index
if term:
dict_cursor = dict_cursor[subkey]
else:
return {}, None
return dict_cursor, subkey
def set_es_key(lookup_dict, term, value):
""" Looks up the location that the term maps to and sets it to the given value.
:returns: True if the value was set successfully, False otherwise.
"""
value_dict, value_key = _find_es_dict_by_key(lookup_dict, term)
if value_dict is not None:
value_dict[value_key] = value
return True
return False
def lookup_es_key(lookup_dict, term):
""" Performs iterative dictionary search for the given term.
:returns: The value identified by term or None if it cannot be found.
"""
value_dict, value_key = _find_es_dict_by_key(lookup_dict, term)
return None if value_key is None else value_dict[value_key]
def ts_to_dt(timestamp):
if isinstance(timestamp, datetime.datetime):
return timestamp
dt = dateutil.parser.parse(timestamp)
# Implicitly convert local timestamps to UTC
if dt.tzinfo is None:
dt = dt.replace(tzinfo=pytz.utc)
return dt
def dt_to_ts(dt):
if not isinstance(dt, datetime.datetime):
logging.warning('Expected datetime, got %s' % (type(dt)))
return dt
ts = dt.isoformat()
# Round microseconds to milliseconds
if dt.tzinfo is None:
# Implicitly convert local times to UTC
return ts + 'Z'
# isoformat() uses microsecond accuracy and timezone offsets
# but we should try to use millisecond accuracy and Z to indicate UTC
return ts.replace('000+00:00', 'Z').replace('+00:00', 'Z')
def ts_to_dt_with_format(timestamp, ts_format):
if isinstance(timestamp, datetime.datetime):
return timestamp
dt = datetime.datetime.strptime(timestamp, ts_format)
# Implicitly convert local timestamps to UTC
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt
def dt_to_ts_with_format(dt, ts_format):
if not isinstance(dt, datetime.datetime):
logging.warning('Expected datetime, got %s' % (type(dt)))
return dt
ts = dt.strftime(ts_format)
return ts
def ts_now():
return datetime.datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
def inc_ts(timestamp, milliseconds=1):
"""Increment a timestamp by milliseconds."""
dt = ts_to_dt(timestamp)
dt += datetime.timedelta(milliseconds=milliseconds)
return dt_to_ts(dt)
def pretty_ts(timestamp, tz=True):
"""Pretty-format the given timestamp (to be printed or logged hereafter).
If tz, the timestamp will be converted to local time.
Format: YYYY-MM-DD HH:MM TZ"""
dt = timestamp
if not isinstance(timestamp, datetime.datetime):
dt = ts_to_dt(timestamp)
if tz:
dt = dt.astimezone(dateutil.tz.tzlocal())
return dt.strftime('%Y-%m-%d %H:%M %Z')
def ts_add(ts, td):
""" Allows a timedelta (td) add operation on a string timestamp (ts) """
return dt_to_ts(ts_to_dt(ts) + td)
def hashable(obj):
""" Convert obj to a hashable obj.
We use the value of some fields from Elasticsearch as keys for dictionaries. This means
that whatever Elasticsearch returns must be hashable, and it sometimes returns a list or dict."""
if not obj.__hash__:
return str(obj)
return obj
def format_index(index, start, end, add_extra=False):
""" Takes an index, specified using strftime format, start and end time timestamps,
and outputs a wildcard based index string to match all possible timestamps. """
# Convert to UTC
start -= start.utcoffset()
end -= end.utcoffset()
original_start = start
indices = set()
while start.date() <= end.date():
indices.add(start.strftime(index))
start += datetime.timedelta(days=1)
num = len(indices)
if add_extra:
while len(indices) == num:
original_start -= datetime.timedelta(days=1)
new_index = original_start.strftime(index)
assert new_index != index, "You cannot use a static index with search_extra_index"
indices.add(new_index)
return ','.join(indices)
class EAException(Exception):
pass
def seconds(td):
return td.seconds + td.days * 24 * 3600
def total_seconds(dt):
# For python 2.6 compatability
if dt is None:
return 0
elif hasattr(dt, 'total_seconds'):
return dt.total_seconds()
else:
return (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 10**6) / 10**6
def dt_to_int(dt):
dt = dt.replace(tzinfo=None)
return int(total_seconds((dt - datetime.datetime.utcfromtimestamp(0))) * 1000)
def unixms_to_dt(ts):
return unix_to_dt(float(ts) / 1000)
def unix_to_dt(ts):
dt = datetime.datetime.utcfromtimestamp(float(ts))
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt
def dt_to_unix(dt):
return int(total_seconds(dt - datetime.datetime(1970, 1, 1, tzinfo=dateutil.tz.tzutc())))
def dt_to_unixms(dt):
return int(dt_to_unix(dt) * 1000)
def cronite_datetime_to_timestamp(self, d):
"""
Converts a `datetime` object `d` into a UNIX timestamp.
"""
if d.tzinfo is not None:
d = d.replace(tzinfo=None) - d.utcoffset()
return total_seconds((d - datetime.datetime(1970, 1, 1)))
def add_raw_postfix(field, is_five_or_above):
if is_five_or_above:
end = '.keyword'
else:
end = '.raw'
if not field.endswith(end):
field += end
return field
def replace_dots_in_field_names(document):
""" This method destructively modifies document by replacing any dots in
field names with an underscore. """
for key, value in list(document.items()):
if isinstance(value, dict):
value = replace_dots_in_field_names(value)
if isinstance(key, string_types) and key.find('.') != -1:
del document[key]
document[key.replace('.', '_')] = value
return document
def elasticsearch_client(conf):
""" returns an :class:`ElasticSearchClient` instance configured using an es_conn_config """
es_conn_conf = build_es_conn_config(conf)
auth = Auth()
es_conn_conf['http_auth'] = auth(host=es_conn_conf['es_host'],
username=es_conn_conf['es_username'],
password=es_conn_conf['es_password'],
aws_region=es_conn_conf['aws_region'],
profile_name=es_conn_conf['profile'])
return ElasticSearchClient(es_conn_conf)
def build_es_conn_config(conf):
""" Given a conf dictionary w/ raw config properties 'use_ssl', 'es_host', 'es_port'
'es_username' and 'es_password', this will return a new dictionary
with properly initialized values for 'es_host', 'es_port', 'use_ssl' and 'http_auth' which
will be a basicauth username:password formatted string """
parsed_conf = {}
parsed_conf['use_ssl'] = os.environ.get('ES_USE_SSL', False)
parsed_conf['verify_certs'] = True
parsed_conf['ca_certs'] = None
parsed_conf['client_cert'] = None
parsed_conf['client_key'] = None
parsed_conf['http_auth'] = None
parsed_conf['es_username'] = None
parsed_conf['es_password'] = None
parsed_conf['aws_region'] = None
parsed_conf['profile'] = None
parsed_conf['es_host'] = os.environ.get('ES_HOST', conf['es_host'])
parsed_conf['es_port'] = int(os.environ.get('ES_PORT', conf['es_port']))
parsed_conf['es_url_prefix'] = ''
parsed_conf['es_conn_timeout'] = conf.get('es_conn_timeout', 20)
parsed_conf['send_get_body_as'] = conf.get('es_send_get_body_as', 'GET')
if os.environ.get('ES_USERNAME'):
parsed_conf['es_username'] = os.environ.get('ES_USERNAME')
parsed_conf['es_password'] = os.environ.get('ES_PASSWORD')
elif 'es_username' in conf:
parsed_conf['es_username'] = conf['es_username']
parsed_conf['es_password'] = conf['es_password']
if os.environ.get('ES_SINGLE_INDEX'):
parsed_conf['run_on_single_index'] = True if os.environ.get('ES_SINGLE_INDEX') == "True" else False
elif 'run_on_single_index' in conf:
parsed_conf['run_on_single_index'] = conf['run_on_single_index']
else:
parsed_conf['run_on_single_index'] = False
if 'aws_region' in conf:
parsed_conf['aws_region'] = conf['aws_region']
# Deprecated
if 'boto_profile' in conf:
logging.warning('Found deprecated "boto_profile", use "profile" instead!')
parsed_conf['profile'] = conf['boto_profile']
if 'profile' in conf:
parsed_conf['profile'] = conf['profile']
if 'use_ssl' in conf:
parsed_conf['use_ssl'] = conf['use_ssl']
if 'verify_certs' in conf:
parsed_conf['verify_certs'] = conf['verify_certs']
if 'ca_certs' in conf:
parsed_conf['ca_certs'] = conf['ca_certs']
if 'client_cert' in conf:
parsed_conf['client_cert'] = conf['client_cert']
if 'client_key' in conf:
parsed_conf['client_key'] = conf['client_key']
if 'es_url_prefix' in conf:
parsed_conf['es_url_prefix'] = conf['es_url_prefix']
return parsed_conf
def pytzfy(dt):
# apscheduler requires pytz timezone objects
# This function will replace a dateutil.tz one with a pytz one
if dt.tzinfo is not None:
new_tz = pytz.timezone(dt.tzinfo.tzname('Y is this even required??'))
return dt.replace(tzinfo=new_tz)
return dt
def parse_duration(value):
"""Convert ``unit=num`` spec into a ``timedelta`` object."""
unit, num = value.split('=')
return datetime.timedelta(**{unit: int(num)})
def parse_deadline(value):
"""Convert ``unit=num`` spec into a ``datetime`` object."""
duration = parse_duration(value)
return ts_now() + duration
def flatten_dict(dct, delim='.', prefix=''):
ret = {}
for key, val in list(dct.items()):
if type(val) == dict:
ret.update(flatten_dict(val, prefix=prefix + key + delim))
else:
ret[prefix + key] = val
return ret
def resolve_string(string, match, missing_text='<MISSING VALUE>'):
"""
Given a python string that may contain references to fields on the match dictionary,
the strings are replaced using the corresponding values.
However, if the referenced field is not found on the dictionary,
it is replaced by a default string.
Strings can be formatted using the old-style format ('%(field)s') or
the new-style format ('{match[field]}').
:param string: A string that may contain references to values of the 'match' dictionary.
:param match: A dictionary with the values to replace where referenced by keys in the string.
:param missing_text: The default text to replace a formatter with if the field doesnt exist.
"""
flat_match = flatten_dict(match)
flat_match.update(match)
dd_match = collections.defaultdict(lambda: missing_text, flat_match)
dd_match['_missing_value'] = missing_text
while True:
try:
string = string % dd_match
string = string.format(**dd_match)
break
except (KeyError, ValueError) as e:
if '{%s}' % str(e).strip("'") not in string:
break
string = string.replace('{%s}' % str(e).strip("'"), '{_missing_value}')
return string
def should_scrolling_continue(rule_conf):
"""
Tells about a rule config if it can scroll still or should stop the scrolling.
:param: rule_conf as dict
:rtype: bool
"""
max_scrolling = rule_conf.get('max_scrolling_count')
stop_the_scroll = 0 < max_scrolling <= rule_conf.get('scrolling_cycle')
return not stop_the_scroll
|
[] |
[] |
[
"ES_USE_SSL",
"ES_HOST",
"ES_USERNAME",
"ES_PASSWORD",
"ES_PORT",
"ES_SINGLE_INDEX"
] |
[]
|
["ES_USE_SSL", "ES_HOST", "ES_USERNAME", "ES_PASSWORD", "ES_PORT", "ES_SINGLE_INDEX"]
|
python
| 6 | 0 | |
project/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opencvFaceRec.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
dashboard.go
|
package gapi
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"strconv"
)
type DashboardMeta struct {
IsStarred bool `json:"isStarred"`
Slug string `json:"slug"`
Folder int64 `json:"folderId"`
}
type DashboardSaveResponse struct {
Slug string `json:"slug"`
Id int64 `json:"id"`
Uid string `json:"uid"`
Status string `json:"status"`
Version int64 `json:"version"`
}
type Dashboard struct {
Meta DashboardMeta `json:"meta"`
Model map[string]interface{} `json:"dashboard"`
Folder int64 `json:"folderId"`
Overwrite bool `json:overwrite`
}
// Deprecated: use NewDashboard instead
func (c *Client) SaveDashboard(model map[string]interface{}, overwrite bool) (*DashboardSaveResponse, error) {
wrapper := map[string]interface{}{
"dashboard": model,
"overwrite": overwrite,
}
data, err := json.Marshal(wrapper)
if err != nil {
return nil, err
}
req, err := c.newRequest("POST", "/api/dashboards/db", nil, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
resp, err := c.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
data, _ = ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf("status: %d, body: %s", resp.StatusCode, data)
}
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
result := &DashboardSaveResponse{}
err = json.Unmarshal(data, &result)
return result, err
}
func (c *Client) NewDashboard(dashboard Dashboard, orgID int64) (*DashboardSaveResponse, error) {
data, err := json.Marshal(dashboard)
if err != nil {
return nil, err
}
req, err := c.newRequest("POST", "/api/dashboards/db", nil, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
req.Header.Set("X-Grafana-Org-Id", strconv.FormatInt(orgID, 10))
resp, err := c.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, errors.New(resp.Status)
}
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
result := &DashboardSaveResponse{}
err = json.Unmarshal(data, &result)
return result, err
}
func (c *Client) Dashboard(slug string, orgID int64) (*Dashboard, error) {
path := fmt.Sprintf("/api/dashboards/db/%s", slug)
req, err := c.newRequest("GET", path, nil, nil)
if err != nil {
return nil, err
}
req.Header.Set("X-Grafana-Org-Id", strconv.FormatInt(orgID, 10))
resp, err := c.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, errors.New(resp.Status)
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
result := &Dashboard{}
err = json.Unmarshal(data, &result)
result.Folder = result.Meta.Folder
if os.Getenv("GF_LOG") != "" {
log.Printf("got back dashboard response %s", data)
}
return result, err
}
func (c *Client) DeleteDashboard(slug string, orgID int64) error {
path := fmt.Sprintf("/api/dashboards/db/%s", slug)
req, err := c.newRequest("DELETE", path, nil, nil)
if err != nil {
return err
}
req.Header.Set("X-Grafana-Org-Id", strconv.FormatInt(orgID, 10))
resp, err := c.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return errors.New(resp.Status)
}
return nil
}
|
[
"\"GF_LOG\""
] |
[] |
[
"GF_LOG"
] |
[]
|
["GF_LOG"]
|
go
| 1 | 0 | |
cluster_vilbert.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import os
import random
from io import open
import numpy as np
import math
from tensorboardX import SummaryWriter
from tqdm import tqdm
from bisect import bisect
import yaml
from easydict import EasyDict as edict
import pdb
import sys
import torch
import torch.nn.functional as F
import torch.nn as nn
from vilbert.datasets.discourse_relation_dataset import DiscourseRelationDataset
from pytorch_transformers.tokenization_bert import BertTokenizer
from torch.utils.data import DataLoader, Dataset, RandomSampler
from pytorch_transformers.optimization import (
AdamW,
WarmupConstantSchedule,
WarmupLinearSchedule,
)
from vilbert.optimization import RAdam
from vilbert.task_utils import (
LoadDatasets,
LoadLosses,
ForwardModelsTrain,
ForwardModelsVal,
)
# from torch.optim.lr_scheduler import (
# LambdaLR,
# ReduceLROnPlateau,
# CosineAnnealingLR,
# CosineAnnealingWarmRestarts,
# )
import vilbert.utils as utils
import torch.distributed as dist
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
from sklearn.cluster import MiniBatchKMeans
from collections import defaultdict
import numpy as np
def main():
# os.environ['CUDA_VISIBLE_DEVICES'] = "0,1"
batch_size = 64
parser = argparse.ArgumentParser()
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--from_pretrained",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--output_dir",
default="save",
type=str,
help="The output directory where the model checkpoints will be written.",
)
parser.add_argument(
"--config_file",
default="config/bert_base_6layer_6conect.json",
type=str,
help="The config file which specified the model details.",
)
parser.add_argument(
"--num_train_epochs",
default=20,
type=int,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--train_iter_multiplier",
default=1.0,
type=float,
help="multiplier for the multi-task training.",
)
parser.add_argument(
"--train_iter_gap",
default=4,
type=int,
help="forward every n iteration is the validation score is not improving over the last 3 epoch, -1 means will stop",
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether not to use CUDA when available"
)
parser.add_argument(
"--do_lower_case",
default=True,
type=bool,
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus",
)
parser.add_argument(
"--seed", type=int, default=0, help="random seed for initialization"
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit float precision instead of 32-bit",
)
parser.add_argument(
"--loss_scale",
type=float,
default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n",
)
parser.add_argument(
"--num_workers",
type=int,
default=16,
help="Number of workers in the dataloader.",
)
parser.add_argument(
"--save_name", default="", type=str, help="save name for training."
)
parser.add_argument(
"--in_memory",
default=False,
type=bool,
help="whether use chunck for parallel training.",
)
parser.add_argument(
"--optim", default="AdamW", type=str, help="what to use for the optimization."
)
parser.add_argument(
"--tasks", default="0", type=str, help="discourse : TASK0"
)
parser.add_argument(
"--freeze",
default=-1,
type=int,
help="till which layer of textual stream of vilbert need to fixed.",
)
parser.add_argument(
"--vision_scratch",
action="store_true",
help="whether pre-trained the image or not.",
)
parser.add_argument(
"--evaluation_interval", default=1, type=int, help="evaluate very n epoch."
)
parser.add_argument(
"--lr_scheduler",
default="mannul",
type=str,
help="whether use learning rate scheduler.",
)
parser.add_argument(
"--baseline", action="store_true", help="whether use single stream baseline."
)
parser.add_argument(
"--resume_file", default="", type=str, help="Resume from checkpoint"
)
parser.add_argument(
"--dynamic_attention",
action="store_true",
help="whether use dynamic attention.",
)
parser.add_argument(
"--clean_train_sets",
default=True,
type=bool,
help="whether clean train sets for multitask data.",
)
parser.add_argument(
"--visual_target",
default=0,
type=int,
help="which target to use for visual branch. \
0: soft label, \
1: regress the feature, \
2: NCE loss.",
)
parser.add_argument(
"--task_specific_tokens",
action="store_true",
default=False,
help="whether to use task specific tokens for the multi-task learning.",
)
# todo
args = parser.parse_args()
with open("vilbert_tasks.yml", "r") as f:
task_cfg = edict(yaml.safe_load(f))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if args.baseline:
from pytorch_transformers.modeling_bert import BertConfig
from vilbert.basebert import BaseBertForVLTasks
else:
from vilbert.vilbert import BertConfig
from vilbert.vilbert import VILBertForVLTasks
task_names = []
task_lr = []
task_id = 1
for i, task_id in enumerate(args.tasks.split("-")):
task_id = str(1)
task = "TASK" + task_id
name = task_cfg[task]["name"]
task_names.append(name)
task_lr.append(task_cfg[task]["lr"])
base_lr = min(task_lr)
loss_scale = {}
for i, task_id in enumerate(args.tasks.split("-")):
task = "TASK" + task_id
loss_scale[task] = task_lr[i] / base_lr
if args.save_name:
prefix = "-" + args.save_name
else:
prefix = ""
timeStamp = (
"-".join("discourse")
+ "_"
+ args.config_file.split("/")[1].split(".")[0]
+ prefix
)
savePath = os.path.join(args.output_dir, timeStamp)
bert_weight_name = json.load(
open("config/" + args.bert_model + "_weight_name.json", "r")
)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 3
torch.distributed.init_process_group(backend="nccl")
logger.info(
"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16
)
)
default_gpu = False
if dist.is_available() and args.local_rank != -1:
rank = dist.get_rank()
if rank == 0:
default_gpu = True
else:
default_gpu = True
if default_gpu:
if not os.path.exists(savePath):
os.makedirs(savePath)
config = BertConfig.from_json_file(args.config_file)
if default_gpu:
# save all the hidden parameters.
with open(os.path.join(savePath, "command.txt"), "w") as f:
print(args, file=f) # Python 3.x
print("\n", file=f)
print(config, file=f)
# task_batch_size, task_num_iters, task_ids, task_datasets_train, task_datasets_val, task_dataloader_train, task_dataloader_val = LoadDatasets(
# args, task_cfg, args.tasks.split("-"),'train'
# )
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case
)
labels = ["Visible", 'Subjective', 'Action', 'Story', 'Meta', 'Irrelevant', 'Other']
train_dataset = DiscourseRelationDataset(
labels,
task_cfg[task]["dataroot"],
tokenizer,
args.bert_model,
task_cfg[task]["max_seq_length"],
encoding="utf-8",
visual_target=0,
batch_size=batch_size,
shuffle=False,
num_workers=4,
cache=5000,
drop_last=False,
cuda=False,
objective=0,
visualization=False,
)
train_sampler = RandomSampler(train_dataset)
train_loader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size= batch_size,
num_workers=0,
pin_memory=True,
)
# for i in train_loader:
# print("hello")
# todo task_ids , task_num_tiers
task_ids = ['TASK0']
task_num_iters = [100]
task_batch_size = task_cfg['TASK0']["batch_size"]
print("task_batch_size")
print(task_batch_size)
logdir = os.path.join(savePath, "logs")
tbLogger = utils.tbLogger(
logdir,
savePath,
task_names,
task_ids,
task_num_iters,
args.gradient_accumulation_steps,
)
if args.visual_target == 0:
config.v_target_size = 1601
config.visual_target = args.visual_target
else:
config.v_target_size = 2048
config.visual_target = args.visual_target
if args.task_specific_tokens:
print("*********** config.task_specific_tokens = True ************")
config.task_specific_tokens = True
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
num_train_optimization_steps = 10
num_labels = len(labels)
if args.dynamic_attention:
config.dynamic_attention = True
if "roberta" in args.bert_model:
config.model = "roberta"
if args.baseline:
model = BaseBertForVLTasks.from_pretrained(
args.from_pretrained,
config=config,
num_labels=num_labels,
default_gpu=default_gpu,
)
else:
model = VILBertForVLTasks.from_pretrained(
args.from_pretrained,
config=config,
num_labels=num_labels,
default_gpu=default_gpu,
)
model.double()
model = model.to(device)
task_losses = LoadLosses(args, task_cfg, args.tasks.split("-"))
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
if args.freeze != -1:
bert_weight_name_filtered = []
for name in bert_weight_name:
if "embeddings" in name:
bert_weight_name_filtered.append(name)
elif "encoder" in name:
layer_num = name.split(".")[2]
if int(layer_num) <= args.freeze:
bert_weight_name_filtered.append(name)
optimizer_grouped_parameters = []
for key, value in dict(model.named_parameters()).items():
if key[12:] in bert_weight_name_filtered:
value.requires_grad = False
if default_gpu:
print("filtered weight")
print(bert_weight_name_filtered)
optimizer_grouped_parameters = []
for key, value in dict(model.named_parameters()).items():
if value.requires_grad:
if "vil_" in key:
lr = 1e-4
else:
if args.vision_scratch:
if key[12:] in bert_weight_name:
lr = base_lr
else:
lr = 1e-4
else:
lr = base_lr
if any(nd in key for nd in no_decay):
optimizer_grouped_parameters += [
{"params": [value], "lr": lr, "weight_decay": 0.0}
]
if not any(nd in key for nd in no_decay):
optimizer_grouped_parameters += [
{"params": [value], "lr": lr, "weight_decay": 0.01}
]
if default_gpu:
print(len(list(model.named_parameters())), len(optimizer_grouped_parameters))
if args.optim == "AdamW":
optimizer = AdamW(optimizer_grouped_parameters, lr=base_lr, correct_bias=False, weight_decay=1e-4)
elif args.optim == "RAdam":
optimizer = RAdam(optimizer_grouped_parameters, lr=base_lr, weight_decay=1e-4)
startIterID = 0
global_step = 0
start_epoch = 0
if args.resume_file != "" and os.path.exists(args.resume_file):
checkpoint = torch.load(args.resume_file, map_location="cpu")
new_dict = {}
for attr in checkpoint["model_state_dict"]:
if attr.startswith("module."):
new_dict[attr.replace("module.", "", 1)] = checkpoint[
"model_state_dict"
][attr]
else:
new_dict[attr] = checkpoint["model_state_dict"][attr]
model.load_state_dict(new_dict)
# warmup_scheduler.load_state_dict(checkpoint["warmup_scheduler_state_dict"])
# lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
global_step = checkpoint["global_step"]
start_epoch = int(checkpoint["epoch_id"]) + 1
task_stop_controller = checkpoint["task_stop_controller"]
tbLogger = checkpoint["tb_logger"]
del checkpoint
model.to(device)
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
)
model = DDP(model, delay_allreduce=True)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if default_gpu:
print("***** Running training *****")
print(" Num Iters: ", task_num_iters)
print(" Batch size: ", batch_size)
print(" Num steps: %d" % num_train_optimization_steps)
target_path = os.path.join(task_cfg[task]["dataroot"], "all_targets_json.json")
all_targets = json.load(open(target_path, "r"))
model = model.to(device)
print(next(model.parameters()).is_cuda)
kmeans = MiniBatchKMeans(n_clusters=num_labels,random_state = 0,batch_size = batch_size)
with torch.no_grad():
model.eval()
print("********* training *************")
for step, batch in enumerate(train_loader):
print("########## STEP @ {} ##########".format(step))
batch = tuple(t.to(device=device, non_blocking=True) if type(t) == torch.Tensor else t for t in batch)
input_ids, input_mask, segment_ids, image_feat, image_loc, image_mask, image_id = (batch)
print(input_ids.shape)
print(len(train_loader))
true_targets = []
for id in image_id:
true_targets.append(np.fromiter(all_targets[id].values(), dtype=np.double))
true_targets = torch.from_numpy(np.array(true_targets))
true_targets = true_targets.to(device)
model.double()
model = model.to(device)
pooled_output, discourse_prediction, vil_prediction, vil_prediction_gqa, vil_logit, vil_binary_prediction, vil_tri_prediction, vision_prediction, vision_logit, linguisic_prediction, linguisic_logit, _ \
= model(
True,
input_ids,
image_feat,
image_loc,
segment_ids,
input_mask,
image_mask,
)
kmeans.partial_fit(pooled_output.to('cpu'))
evaluate(model, kmeans, device, task_cfg, tokenizer, args, labels)
def evaluate(model,kmeans, device, task_cfg, tokenizer, args, labels):
model.eval()
task = "TASK0"
target_path = os.path.join(task_cfg[task]["test_dataroot"], "all_targets_json.json")
batch_size = 64
test_dataset = DiscourseRelationDataset(
labels,
task_cfg[task]["test_dataroot"],
tokenizer,
args.bert_model,
task_cfg[task]["max_seq_length"],
encoding="utf-8",
visual_target=0,
batch_size=64,
shuffle=False,
num_workers=2,
cache=5000,
drop_last=False,
cuda=False,
objective=0,
visualization=False,
)
all_targets = json.load(open(target_path, "r"))
test_sampler = RandomSampler(test_dataset)
test_loader = DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=batch_size,
num_workers=0,
pin_memory=True,
)
cluster_label_map = np.zeros((len(labels), len(labels)))
label_label_map = np.zeros((len(labels), len(labels)))
with torch.no_grad():
for batch in test_loader:
batch = tuple(t.to(device=device, non_blocking=True) if type(t) == torch.Tensor else t for t in batch)
input_ids, input_mask, segment_ids, image_feat, image_loc, image_mask, image_id = (batch)
true_targets = []
for id in image_id:
true_targets.append(np.fromiter(all_targets[id].values(), dtype=np.double))
true_targets = torch.from_numpy(np.array(true_targets))
true_targets = true_targets.to(device)
model.double()
model = model.to(device)
pooled_output, discourse_prediction, vil_prediction, vil_prediction_gqa, vil_logit, vil_binary_prediction, vil_tri_prediction, vision_prediction, vision_logit, linguisic_prediction, linguisic_logit, _ \
= model(
True,
input_ids,
image_feat,
image_loc,
segment_ids,
input_mask,
image_mask
)
preds = kmeans.predict(pooled_output.to('cpu'))
for clstr_indx, y_true in zip(preds, true_targets):
cluster_label_map[clstr_indx] += y_true.cpu().numpy()
for y_true in true_targets:
for l, _ in enumerate(labels):
if y_true[l] == 1:
label_label_map[l] += label_label_map
print("********** clstr labels is *******")
print(cluster_label_map)
print("********** label labels is *******")
print(label_label_map )
if __name__ == "__main__":
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
rstoolbox/tests/components/test_design.py
|
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Jaume Bonet <[email protected]>
.. affiliation::
Laboratory of Protein Design and Immunoengineering <lpdi.epfl.ch>
Bruno Correia <[email protected]>
"""
# Standard Libraries
import os
import copy
# External Libraries
import pandas as pd
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import pytest
# This Library
import rstoolbox.io as ri
import rstoolbox.components as rc
import rstoolbox.plot as rp
import rstoolbox.analysis as ra
import rstoolbox.utils as ru
from rstoolbox.tests.helper import baseline_test_dir, random_frequency_matrix
class TestDesign( object ):
"""
Test usage of the DesignSeries/DesignFrame components.
Checks: apply the attached functions of the objects. This includes
setters/getters, reference data management and other utilities.
In the reference data test, it includes a test for the SelectionContainer.
"""
def setup_method( self, method ):
self.dirpath = os.path.join(os.path.dirname(__file__), '..', 'data')
self.silent1 = os.path.join(self.dirpath, 'input_2seq.minisilent.gz')
self.silent2 = os.path.join(self.dirpath, 'input_sse.minsilent.gz')
self.silent3 = os.path.join(self.dirpath, 'input_ssebig.minisilent.gz')
self.silent4 = os.path.join(self.dirpath, 'input_3ssepred.minisilent.gz')
self.score1 = os.path.join(self.dirpath, 'remodel.sc.gz')
@pytest.fixture(autouse=True)
def setup( self, tmpdir ):
self.tmpdir = tmpdir.strpath
def test_getters( self ):
"""
Test usage of the getter functions.
"""
# Assert types. Rows are DesignSeries, columns are not
sc_des = {"labels": ["MOTIF", "CONTACT", "CONTEXT"], "sequence": "AB"}
df = ri.parse_rosetta_file(self.silent1, sc_des)
assert isinstance(df, rc.DesignFrame)
sr = df.iloc[0]
assert isinstance(sr, rc.DesignSeries)
assert not isinstance(df["description"], rc.DesignSeries)
assert isinstance(df["description"], pd.Series)
# Check working with sequence getters
# We check everything both for DesignSeries and DesignFrame
# DesignFrame returns Series, while DesignSeries returns the
# actual data.
assert sorted(df.get_available_sequences()) == ["A", "B"]
assert sorted(sr.get_available_sequences()) == ["A", "B"]
assert len(df.get_sequence("A")) == 6
assert len(sr.get_sequence("A")) == 157
assert df.get_sequence("B")[0] == sr.get_sequence("B")
# Check working with label getters
# We check everything both for DesignSeries and DesignFrame
# DesignFrame returns Series, while DesignSeries returns the
# actual data.
assert sorted(df.get_available_labels()) == sorted(sc_des["labels"])
assert sorted(sr.get_available_labels()) == sorted(sc_des["labels"])
with pytest.raises(KeyError):
sr.get_label("MOTIF")
assert isinstance(df.get_label("MOTIF", "A")[0], rc.Selection)
assert isinstance(sr.get_label("MOTIF", "A"), rc.Selection)
assert str(df.get_label("CONTEXT", "B")[0]) == ""
assert str(sr.get_label("CONTEXT", "A")) == "1-157"
assert str(sr.get_label("CONTEXT", "B")) != str(sr.get_label("CONTEXT", "A"))
# Check working with structure getters
# We check everything both for DesignSeries and DesignFrame
# DesignFrame returns Series, while DesignSeries returns the
# actual data.
sc_des = {"sequence": "C", "structure": "C"}
df = ri.parse_rosetta_file(self.silent2, sc_des)
sr = df.iloc[0]
assert df.get_available_structures() == ["C"]
assert sr.get_available_structures() == ["C"]
with pytest.raises(KeyError):
assert len(df.get_structure("B")) == 6
with pytest.raises(KeyError):
assert len(sr.get_structure("B")) == 157
assert df.get_structure("C")[0] == sr.get_structure("C")
# Check working with structure prediction getters
# We check everything both for DesignSeries and DesignFrame
# DesignFrame returns Series, while DesignSeries returns the
# actual data.
assert df.get_available_structure_predictions() == []
with pytest.raises(KeyError):
assert len(df.get_structure_prediction("C")) == 6
sc_des = {'sequence': 'A', 'structure': 'A', 'psipred': 'A', 'dihedrals': 'A'}
df = ri.parse_rosetta_file(self.silent4, sc_des)
sr = df.iloc[0]
assert df.get_available_structure_predictions() == ['A']
assert df.get_structure_prediction('A')[0] == sr.get_structure_prediction('A')
assert len(df.get_structure_prediction('A')[0]) == 88
assert isinstance(df.get_dihedrals("A"), pd.DataFrame)
assert isinstance(sr.get_dihedrals("A"), list)
for e in sr.get_dihedrals("A"):
assert isinstance(e, np.ndarray)
assert np.array_equal(df.get_dihedrals("A").iloc[0][0], sr.get_dihedrals("A")[0])
# these are the ranges of the rosetta angles.
assert sr.get_phi("A").max() <= 180
assert sr.get_phi("A").min() >= -180
assert sr.get_psi("A").max() <= 180
assert sr.get_psi("A").min() >= -180
def test_incomplete_read( self ):
"""
Test that incomplete score files without the proper header change (such as
is the case with non-successful remodel runs) are read properly
"""
df = ri.parse_rosetta_file(self.score1)
assert (df[df['description'] == 'sketch4_0001']['dslf_fa13'].isna()).all()
assert df[~df['dslf_fa13'].isna()].shape[0] == 6
assert df[df['dslf_fa13'].isna()].shape[0] == 2
assert df.shape[0] == 8
def test_reference( self ):
"""
Test reference data usage.
"""
# Without sequence/structure data, there are no references.
df = ri.parse_rosetta_file(self.silent1)
sr = df.iloc[0]
refseq = "AYSTREILLALCIRDSRVHGNGTLHPVLELAARETPLRLSPEDTVVLRYHVLLEEIIERN" \
"SETFTETWNRFITHTEHVDLDFNSVFLEIFHRGDPSLGRALAWMAWCMHACRTLCCNQST" \
"PYYVVDLSVRGMLEASEGLDGWIHQQGGWSTLIEDNI"
with pytest.raises(KeyError):
df.add_reference_sequence("A", refseq)
with pytest.raises(KeyError):
sr.add_reference_sequence("A", refseq)
with pytest.raises(KeyError):
df.add_reference_shift("A", 2)
# Get label and sequence/structure data to play with integer shift.
sc_des = {"labels": ["MOTIF", "CONTACT", "CONTEXT"], "sequence": "A"}
_a = "9-26,28-29,31-32,35,37-40,67-68,70-71,89,91-116"
_b = "AYSTREILLALCIRDSRVH"
df = ri.parse_rosetta_file(self.silent1, sc_des)
df.add_reference_sequence("A", refseq)
sr = df.iloc[0]
assert df.get_reference_sequence("A") == sr.get_reference_sequence("A")
# Shift tests
assert str(df.get_label("CONTACT", "A")[0]) == "1-19"
ctcopy = copy.deepcopy(df.get_label("CONTACT", "A")[0])
assert str(df.get_label("CONTACT", "B")[0]) == _a
assert df.get_reference_sequence("A", df.get_label("CONTACT", "A")[0]) == _b
df.add_reference_shift("A", 5, shift_labels=True)
# Expected behaviour: all DesignSeries from a DesignFrame share reference data
# and SelectionContainer
assert df.get_reference_shift("A") == 5
assert df.get_reference_shift("A") == sr.get_reference_shift("A")
assert str(df.get_label("CONTACT", "A")[0]) == "5A-23A"
assert str(df.get_label("CONTACT", "B")[0]) == _a
assert str(df.get_label("CONTACT", "A")[0]) == str(sr.get_label("CONTACT", "A"))
assert df.get_reference_sequence("A", df.get_label("CONTACT", "A")[0]) == _b
assert str(ctcopy) == "1-19"
assert df.get_reference_sequence("A", ctcopy) == _b
df.delete_reference("A", shift_labels=True)
assert str(df.get_label("CONTACT", "A")[0]) == "1-19"
assert str(df.get_label("CONTACT", "B")[0]) == _a
assert df.get_reference_shift("A") == 1
with pytest.raises(KeyError):
df.get_reference_sequence("A")
# Let's work with an array-type shift
ashift = list(range(1, len(refseq) + 1))
ashift[30:] = list(np.array(ashift[30:]) + 5)
with pytest.raises(ValueError):
ashift.index(32)
df = ri.parse_rosetta_file(self.silent1, sc_des)
_c = "LHPVLELAARETPLRLSPEDTVVLRYHVLLEEI"
df.add_reference_sequence("A", refseq)
sr = df.iloc[1]
assert str(sr.get_label("CONTACT", "A")) == "24-56"
assert sr.get_reference_sequence("A", sr.get_label("CONTACT", "A")) == _c
df.add_reference_shift("A", ashift, shift_labels=True)
assert str(sr.get_label("CONTACT", "A")) == "24A-30A,36A-61A"
assert sr.get_reference_sequence("A", sr.get_label("CONTACT", "A")) == _c
df.delete_reference("A", shift_labels=True)
assert str(sr.get_label("CONTACT", "A")) == "24-56"
assert df.get_reference_shift("A") == 1
with pytest.raises(KeyError):
df.get_reference_sequence("A")
def test_labels(self):
sc_des = {"scores": ["score"], "labels": ["MOTIF", "CONTACT", "CONTEXT"],
"sequence": "AB"}
df = ri.parse_rosetta_file(self.silent1, sc_des)
df = ra.selector_percentage(df, "A", "10-25", "test")
df = ra.selector_percentage(df, "B", "12-20", "test")
assert set(df.columns) == set(['score', 'lbl_MOTIF', 'lbl_CONTACT',
'lbl_CONTEXT', 'sequence_A', 'sequence_B',
'test_A_perc', 'test_B_perc'])
assert len(df['test_A_perc'].unique()) == 1
assert len(df['test_B_perc'].unique()) == 1
assert df['test_A_perc'].values[0] == pytest.approx(0.1019, rel=1e-3)
assert df['test_B_perc'].values[0] == pytest.approx(0.07758, rel=1e-3)
df = ra.label_percentage(df, "A", "CONTEXT")
df = ra.label_percentage(df, "A", "CONTACT")
df = ra.label_percentage(df, "A", "MOTIF")
df = ra.label_percentage(df, "B", "CONTACT")
df = ra.label_percentage(df, "B", "MOTIF")
df = ra.label_percentage(df, "B", "CONTEXT")
assert len(df['CONTEXT_A_perc'].unique()) == 1
assert df['CONTEXT_A_perc'].values[0] == 1
assert len(df['MOTIF_A_perc'].unique()) == 1
assert df['MOTIF_A_perc'].values[0] == 0
assert len(df['CONTACT_A_perc'].unique()) > 1
assert df['CONTACT_A_perc'].mean() == pytest.approx(0.0552, rel=1e-3)
assert len(df['CONTEXT_B_perc'].unique()) == 1
assert df['CONTEXT_B_perc'].values[0] == 0
assert len(df['MOTIF_B_perc'].unique()) == 1
assert df['MOTIF_B_perc'].values[0] == pytest.approx(0.1896, rel=1e-3)
assert len(df['CONTACT_B_perc'].unique()) > 1
assert df['CONTACT_B_perc'].mean() == pytest.approx(0.4669, rel=1e-3)
def test_label_sequence(self):
sc_des = {'scores': ['score'], 'sequence': '*', 'labels': ['MOTIF', 'CONTACT']}
df = ri.parse_rosetta_file(self.silent1, sc_des)
df = ra.label_sequence(df, 'B', 'MOTIF')
assert df.iloc[0]['MOTIF_B_seq'] == 'DMLPERMIAAALRAIGEIFNAE'
assert df.iloc[5]['MOTIF_B_seq'] == 'DMQPEWAIAAALRAIGEIFNQW'
df1 = ra.label_sequence(df, 'B', 'CONTACT', complete=True)
df2 = ra.label_sequence(df, 'B', 'CONTACT')
assert df1.iloc[0]['CONTACT_B_seq'] == '-RAWRLAEIAMRKGWEEHE-EWWWAKGREMREMKEAWKIAYYWGLMAAYWIKQHREKERK'
assert df1.iloc[5]['CONTACT_B_seq'] == '-FAKEEMHKHEEKAY-EFL-EYLAKP-EEHLE-R-AK-LHEEAAKEIWKFMHEAMRRFE-'
assert df1.iloc[0]['CONTACT_B_seq'].replace('-', '') == df2.iloc[0]['CONTACT_B_seq']
assert df1.iloc[5]['CONTACT_B_seq'].replace('-', '') == df2.iloc[5]['CONTACT_B_seq']
def test_getseqs(self):
sc_des = {"sequence": "B"}
# Start test
df = ri.parse_rosetta_file(self.silent1, sc_des)
assert df.shape[0] == 6
df.get_sequence_with('B', [(1, 'T')]).shape[0] == 3
def test_split_values(self):
# Start test
df = ri.parse_rosetta_file(self.silent1)
split1 = {'split': [('GRMSD2Target', 'grmsdTr'), ('GRMSD2Template', 'grmsdTp'),
('LRMSD2Target', 'lrmsdTp'), ('LRMSDH2Target', 'lrmsdh2'),
('LRMSDLH2Target', 'lrmsdlh2')],
'names': ['rmsd', 'rmsd_type']}
dfs1 = ru.split_values(df, split1)
split2 = {'split': [('GRMSD2Target', 'global', 'target'),
('GRMSD2Template', 'global', 'template'),
('LRMSD2Target', 'local', 'target'),
('LRMSDH2Target', 'local', 'helix2'),
('LRMSDLH2Target', 'local', 'lhelix2')],
'names': ['rmsd', 'rmsd_type', 'rmsd_target']}
dfs2 = ru.split_values(df, split2)
assert df.shape[0] == 6
assert dfs1.shape[0] == 6 * 5
assert dfs1.shape[0] == dfs2.shape[0]
assert dfs1.shape[1] == dfs2.shape[1] - 1
assert 'rmsd' in dfs1.columns
assert 'rmsd' in dfs2.columns
assert 'rmsd_type' in dfs1.columns
assert 'rmsd_type' in dfs2.columns
assert 'rmsd_target' not in dfs1.columns
assert 'rmsd_target' in dfs2.columns
def test_split_columns(self):
data = {'a': [[1, 2], [3, 4], [7, 8]],
'b': [[1, 2], [3, 4], [7, 8]],
'c': ['a', 'b', 'c']}
df = pd.DataFrame(data)
assert df.shape[0] == 3
df = ru.split_dataframe_rows(df, ['a', 'b'])
assert df.shape[0] == 6
def test_clean_rosetta_suffix(self):
# Start test
df = ri.parse_rosetta_file(self.silent1)
df2 = df.clean_rosetta_suffix()
assert len(df['description'].unique()) == df.shape[0]
assert len(df2['description'].unique()) == 1
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_global_preview.png')
def test_global_preview(self):
df = ri.parse_rosetta_file(self.silent1)
values = ["score", "hbond_sr_bb", "B_ni_rmsd", "hbond_bb_sc",
"cav_vol", "design_score", "packstat", "rmsd_drift"]
fig = plt.figure(figsize=(25, 10))
rp.multiple_distributions(df, fig, (2, 4), values=values)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_global_preview_ref1.png')
def test_global_preview_withref1(self):
df = ri.parse_rosetta_file(self.silent1, {'sequence': 'A'})
values = ["score", "hbond_sr_bb", "B_ni_rmsd", "hbond_bb_sc",
"cav_vol", "design_score", "packstat", "rmsd_drift"]
slength = len(df.iloc[0].get_sequence('A'))
refdf = ru.load_refdata('scop2')
refdf = refdf[(refdf['length'] >= slength - 5) &
(refdf['length'] <= slength + 5)]
fig = plt.figure(figsize=(25, 10))
rp.multiple_distributions(df, fig, (2, 4), values=values, refdata=refdf)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_global_preview_ref2.png')
def test_global_preview_withref2(self):
df = ri.parse_rosetta_file(self.silent1, {'sequence': 'A'})
values = ["score", "hbond_sr_bb", "B_ni_rmsd", "hbond_bb_sc",
"cav_vol", "design_score", "packstat", "rmsd_drift"]
slength = len(df.iloc[0].get_sequence('A'))
refdf = ru.load_refdata('scop2')
refdf = refdf[(refdf['length'] >= slength - 5) &
(refdf['length'] <= slength + 5)]
fig = plt.figure(figsize=(25, 10))
rp.multiple_distributions(df, fig, (2, 4), values=values, refdata=refdf,
ref_equivalences={'cavity': 'cav_vol',
'pack': 'packstat'},
violins=False)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_global_preview_ref3.png')
def test_global_preview_withref3(self):
slength = 100
df = ru.load_refdata('scop2')
df = df[(df['length'] >= slength - 5) &
(df['length'] <= slength + 5)]
refdf = ru.load_refdata('scop2', 50)
refdf = refdf[(refdf['length'] >= slength - 5) &
(refdf['length'] <= slength + 5)]
values = ["score", "hbond_sr_bb", "avdegree", "hbond_bb_sc",
"cavity", "CYDentropy", "pack", "radius"]
fig = plt.figure(figsize=(25, 10))
rp.multiple_distributions(df, fig, (2, 4), values=values, refdata=refdf)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_incontext.png')
def test_in_context_plot(self):
slength = 100
df = ru.load_refdata('scop2')
df = df[(df['length'] >= slength - 5) &
(df['length'] <= slength + 5)].head(10)
refdf = ru.load_refdata('scop2', 50)
refdf = refdf[(refdf['length'] >= slength - 5) &
(refdf['length'] <= slength + 5)]
values = ["score", "hbond_sr_bb", "avdegree", "hbond_bb_sc",
"cavity", "CYDentropy", "pack", "radius"]
fig = plt.figure(figsize=(25, 10))
rp.plot_in_context(df, fig, (2, 4), refdata=refdf, values=values,
point_ms=10, kde_color='red')
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_incontexts.png')
def test_in_contexts_plot(self):
df = ru.load_refdata('scop')
qr = pd.DataFrame([['2F4V', 'C'], ['3BFU', 'B'], ['2APJ', 'C'],
['2C37', 'V'], ['2I6E', 'H']], columns=['pdb', 'chain'])
qr = qr.merge(df, on=['pdb', 'chain'])
refs = []
for i, t in qr.iterrows():
refs.append(df[(df['length'] >= (t['length'] - 5)) &
(df['length'] <= (t['length'] + 5))])
fig = plt.figure(figsize=(20, 6))
rp.distribution_quality(df=qr, refdata=refs,
values=['score', 'pack', 'avdegree',
'cavity', 'psipred'],
ascending=[True, False, True, True, False],
names=['pdb', 'chain'],
fig=fig)
plt.tight_layout()
return fig
def test_get_homology(self):
# Values are difficult to assess here, as this will change from one
# download to the next.
# Seems that downloading might not be possible in Travis... (?)
data = ru.make_redundancy_table(precalculated=True, select=[30])
assert len(data.groupby('c30')) > 1
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_mutants_alignment.png')
def test_mutants(self):
# Static data
refseq = "GSISDIRKDAEVRMDKAVEAFKNKLDKFKAAVRKVFPTEERIDMRPEIWIAQELRRIGDE" \
"FNAYRDANDKAAALGKDKEINWFDISQSLWDVQKLTDAAIKKIEAALADMEAWLTQ"
columns = ["mutants_B", "mutant_count_B", "mutant_positions_B"]
mut_number = [97, 91, 88, 90, 92, 92]
mut_type = [
"G1T,S2R,I3P,S4E,D5E,I6A,K8E,D9R,E11W,V12R,R13L,M14A,D15E,K16I,V18M,E19R,A20K,F21G,"
"K22W,N23E,K24E,L25H,D26E,K27R,F28E,K29W,A30E,A31W,V32W,R33K,K34R,V35A,F36S,P37K,"
"T38G,E39R,R41E,I42R,R45L,I48R,W49M,Q52A,E53A,R56A,D59E,E60I,Y64E,R65W,D66Q,A67M,N68R"
",D69L,K70E,A71M,A72E,A73K,L74E,G75R,D77N,K78P,E79N,I80A,N81G,W82E,F83E,D84K,I85M,S86K,"
"Q87E,S88Q,L89K,W90K,D91E,V92A,Q93W,L95I,T96A,D97Y,A98Y,A99W,I100G,K101L,K102M,I103A,"
"E104A,A105Y,A106W,L107I,A108K,D109Q,M110H,E111R,A112E,W113K,L114E,T115R,Q116K",
"G1P,S2K,I3P,S4E,D5E,I6A,R7M,K8R,D9E,E11Y,V12K,R13L,M14I,D15K,A17Y,V18M,E19L,A20K,F21A,"
"K22Q,N23K,K24E,L25A,D26Q,K27E,F28E,K29W,A30E,A31R,V32M,K34R,V35T,F36D,P37G,E39K,R41E,"
"I42K,R45F,I48K,W49M,E53A,R56A,D59E,E60I,R65Y,D66W,N68F,D69L,A71L,A72Q,A73E,L74F,G75K,"
"D77Y,K78P,E79S,I80V,N81R,F83E,D84E,I85Q,S86E,Q87E,S88A,L89R,W90K,D91R,V92L,Q93K,K94I,"
"L95M,T96M,D97K,A98I,A99G,I100A,K101E,K102W,I103A,E104R,A105E,A106I,L107A,A108R,D109E,"
"E111K,A112E,W113R,L114I,T115K,Q116R",
"G1T,S2K,I3P,S4E,D5E,I6M,R7A,K8R,D9E,E11Y,V12K,D15L,V18L,E19K,A20Q,F21G,K22E,N23E,K24E,"
"L25M,D26K,K27R,F28M,K29Y,A30E,A31Q,V32M,R33K,V35G,F36V,P37D,T38S,E39K,R41E,I42R,R45E,"
"I48K,W49M,Q52I,E53A,R56A,D59E,E60L,Y64W,R65M,D66K,N68L,D69R,K70H,A71M,A72K,A73E,G75R,"
"D77L,K78G,E79T,I80S,N81G,W82P,F83K,D84E,I85E,S86E,Q87K,S88H,L89W,W90R,D91W,V92I,Q93F,"
"K94E,T96H,D97R,A98W,I100G,K101E,K102E,E104Q,A105R,L107A,A108E,D109I,M110Q,A112R,W113K,"
"L114A,T115R,Q116W",
"G1T,S2K,I3P,S4E,D5E,I6W,R7A,K8R,D9W,E11Y,V12K,R13E,M14H,D15L,A17M,V18A,A20K,F21H,K22R,"
"N23K,K24E,L25M,D26E,K27I,F28E,K29W,A30E,A31E,V32L,R33K,K34R,V35R,F36D,P37G,T38K,R41E,"
"I42K,R45W,I48R,W49M,Q52M,E53A,R56A,D59E,E60L,A63H,Y64H,R65M,D66Y,N68E,D69M,K70R,A72K,"
"A73E,L74E,G75K,D77K,K78P,I80A,N81K,W82T,F83E,D84E,I85A,S86R,Q87R,S88A,L89R,W90R,D91E,"
"V92I,Q93M,L95Y,T96H,D97H,A98E,I100G,K101R,K102L,A105E,L107M,A108R,D109R,M110L,E111M,"
"A112E,W113R,L114H,T115K,Q116K",
"G1K,S2K,I3W,S4E,D5E,I6M,R7M,K8R,D9E,V12R,R13Q,M14G,D15K,K16E,A17Y,V18A,E19Q,A20K,F21A,"
"K22W,N23K,K24E,L25A,D26L,K27L,F28E,K29W,A30K,A31W,V32M,V35R,F36P,P37V,R41M,I42K,R45A,"
"I48W,W49M,Q52A,E53A,R56A,D59E,E60H,A63I,R65W,D66Q,A67Q,N68K,D69L,K70E,A71H,A72E,A73K,"
"G75R,D77I,K78P,E79N,I80V,N81P,W82E,F83E,D84E,I85L,S86E,Q87K,S88G,L89K,W90E,D91E,V92L,"
"Q93K,K94R,L95I,T96E,D97E,A98E,I100A,K101R,K102M,I103A,A105K,A106Y,L107M,A108Q,D109E,"
"M110L,E111R,A112K,W113K,L114M,T115E,Q116S",
"G1P,S2R,I3P,S4E,D5E,I6M,R7A,K8R,D9F,E11K,V12E,R13E,D15H,A17H,V18E,A20K,F21A,K22Y,N23R"
",K24E,L25F,D26L,K27L,F28E,K29Y,A30E,A31L,V32A,R33I,K34R,V35K,F36N,R41P,I42K,R45Q,I48W"
",W49A,Q52A,E53A,R56A,D59E,E60I,A63Q,Y64W,R65M,D66Y,A67H,N68L,D69L,K70E,A71I,A72R,A73K"
",L74E,G75N,K76G,D77S,K78S,E79H,I80T,N81R,W82Y,F83E,D84E,I85R,S86E,Q87K,S88Y,L89R,W90K"
",D91L,V92A,Q93K,K94R,T96H,D97E,A98E,I100A,K102E,E104W,A105K,A106F,L107M,A108H,D109E,"
"M110A,E111M,A112R,W113R,L114F,T115E,Q116S"
]
mut_pos = [",".join([_[1:-1] for _ in m.split(",")]) for m in mut_type]
sc_des = {"labels": ["MOTIF", "CONTACT"], "sequence": "B"}
# Start test
df = ri.parse_rosetta_file(self.silent1, sc_des)
df.add_reference_sequence("B", refseq)
df = df.identify_mutants("B")
for col in columns:
assert col in df
sr = df.iloc[0]
assert df.get_reference_sequence("B") == sr.get_reference_sequence("B")
assert df.get_identified_mutants() == ["B", ]
dfshift = df.copy()
dfshift.add_reference_shift("B", 15)
dfr = ru.report(dfshift)
assert dfr.iloc[0].get_mutations("B") != df.iloc[0].get_mutations("B")
for i, row in df.iterrows():
# Check number of mutations
assert row.get_mutation_count("B") == mut_number[i]
# Check type of mutations
assert row.get_mutations("B") == mut_type[i]
# Check position of mutations
assert row.get_mutation_positions("B") == mut_pos[i]
# Make new variants
dfm2 = df.iloc[0].generate_mutant_variants('B', [(1, "TGAP"), (14, "MAPT")])
assert dfm2.shape[0] == 16
assert 0 in dfm2.get_mutation_count('B')
# Revert to WT
dfwt = df.iloc[0:2].generate_wt_reversions('B', [1, 14])
assert dfwt.shape[0] == 8
dfwt = rc.DesignFrame({"description": ["reference"], "sequence_B": [refseq]})
dfwt.add_reference_sequence('B', refseq)
dfwt = dfwt.generate_mutant_variants('B', [(1, "TGP"), (6, "ERG"), (14, "MAT")])
assert dfwt.shape[0] == 28
dfwt = dfwt.generate_wt_reversions('B').identify_mutants('B')
assert dfwt.shape[0] == 36
assert 0 in dfwt.get_mutation_count('B').values
assert refseq in dfwt.get_sequence('B').values
# Make mutants from Matrix
dfwt = rc.DesignFrame({"description": ["reference"], "sequence_B": [refseq]})
dfwt.add_reference_sequence('B', refseq)
matrix = random_frequency_matrix(len(df.get_reference_sequence('B')), 0)
key_res = [3, 5, 8, 12, 15, 19, 25, 27]
mutants = dfwt.generate_mutants_from_matrix('B', matrix, 5, key_res)
assert isinstance(mutants, list)
assert len(mutants) == 1
mutants = mutants[0].identify_mutants('B')
assert mutants.shape[0] == 5
assert mutants.pssm_score_B.mean() != 0
# write to resfiles
df.make_resfile("B", "NATAA", os.path.join(self.tmpdir, "mutanttest.resfile"))
for i, row in df.iterrows():
newfile = os.path.join(self.tmpdir, "mutanttest" + "_{:>04d}".format(i) + ".resfile")
assert row["resfile_B"] == newfile
assert os.path.isfile(newfile)
# write alignment
ri.write_mutant_alignments(df, "B", os.path.join(self.tmpdir, "mutanttest.clw"))
assert os.path.isfile(os.path.join(self.tmpdir, "mutanttest.clw"))
# plot mutant
fig = plt.figure(figsize=(30, 10))
ax = plt.subplot2grid((1, 1), (0, 0), fig=fig)
rp.plot_alignment(df, "B", ax, matrix="BLOSUM62")
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_summary.png')
def test_summary_plot(self):
# Start test
df = ri.parse_rosetta_file(self.silent1)
fig = plt.figure(figsize=(30, 30))
rp.multiple_distributions(df, fig, (3, 3), (0, 0),
['score', 'GRMSD2Target', 'GRMSD2Template',
'LRMSD2Target', 'LRMSDH2Target', 'LRMSDLH2Target',
'design_score', 'packstat', 'rmsd_drift'])
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_logo.png')
def test_logo_plot(self):
refseq = "GSISDIRKDAEVRMDKAVEAFKNKLDKFKAAVRKVFPTEERIDMRPEIWIAQELRRIGDE" \
"FNAYRDANDKAAALGKDKEINWFDISQSLWDVQKLTDAAIKKIEAALADMEAWLTQ"
sc_des = {"sequence": "B"}
# Start test
df = ri.parse_rosetta_file(self.silent1, sc_des)
df.add_reference_sequence("B", refseq)
font = FontProperties()
font.set_size(35)
font.set_weight('bold')
fig, axs = rp.logo_plot( df, "B", refseq=True, line_break=50, hight_prop=2 )
for ax, ax2 in axs:
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontproperties(font)
if ax2 is None:
continue
for label in (ax2.get_xticklabels() + ax2.get_yticklabels()):
label.set_fontproperties(font)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_logo_noref.png')
def test_logo_plot_noref(self):
sc_des = {"sequence": "B"}
# Start test
df = ri.parse_rosetta_file(self.silent1, sc_des)
font = FontProperties()
font.set_size(35)
font.set_weight('bold')
fig, axs = rp.logo_plot( df, "B", refseq=False, line_break=50, hight_prop=2 )
for ax, ax2 in axs:
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontproperties(font)
if ax2 is None:
continue
for label in (ax2.get_xticklabels() + ax2.get_yticklabels()):
label.set_fontproperties(font)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_logo_bits.png')
def test_logo_plot_bits(self):
refseq = "GSISDIRKDAEVRMDKAVEAFKNKLDKFKAAVRKVFPTEERIDMRPEIWIAQELRRIGDE" \
"FNAYRDANDKAAALGKDKEINWFDISQSLWDVQKLTDAAIKKIEAALADMEAWLTQ"
sc_des = {"sequence": "B"}
# Start test
df = ri.parse_rosetta_file(self.silent1, sc_des)
df.add_reference_sequence("B", refseq)
df = df.sequence_bits('B')
font = FontProperties()
font.set_size(35)
font.set_weight('bold')
fig, axs = rp.logo_plot( df, "B", refseq=True, line_break=50, hight_prop=2 )
for ax, ax2 in axs:
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontproperties(font)
if ax2 is None:
continue
for label in (ax2.get_xticklabels() + ax2.get_yticklabels()):
label.set_fontproperties(font)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_logo_bits_noref.png')
def test_logo_plot_bits_noref(self):
sc_des = {"sequence": "B"}
# Start test
df = ri.parse_rosetta_file(self.silent1, sc_des)
df = df.sequence_bits('B')
font = FontProperties()
font.set_size(35)
font.set_weight('bold')
fig, axs = rp.logo_plot( df, "B", refseq=False, line_break=50, hight_prop=2 )
for ax, ax2 in axs:
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontproperties(font)
if ax2 is None:
continue
for label in (ax2.get_xticklabels() + ax2.get_yticklabels()):
label.set_fontproperties(font)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_per_res_matrix_score.png')
def test_per_res_matrix_score(self):
sc_des = {"scores": ["score"], "sequence": "B"}
df = ri.parse_rosetta_file(self.silent1, sc_des)
df.add_reference_sequence('B', df.iloc[0]['sequence_B'])
df.add_reference_shift('B', 10)
seles = [('15-25', 'red'), ('45B-60B', 'green')]
fig = plt.figure(figsize=(25, 10))
ax0 = plt.subplot2grid((2, 1), (0, 0))
rp.per_residue_matrix_score_plot(df.iloc[1], "B", ax0)
ax1 = plt.subplot2grid((2, 1), (1, 0))
rp.per_residue_matrix_score_plot(df.iloc[1], "B", ax1, selections=seles)
plt.tight_layout()
return fig
def test_sequence_distances( self ):
sc_des = {"sequence": "AB"}
df = ri.parse_rosetta_file(self.silent1, sc_des)
dif1 = df.sequence_distance('A')
assert (dif1.max() == 0).all()
dif2 = df.sequence_distance('B')
dif3 = df.sequence_distance('B', df)
assert dif2.equals(dif3)
assert dif2.max().max() == 81
def test_sequence_similarities(self):
refseq = "GSISDIRKDAEVRMDKAVEAFKNKLDKFKAAVRKVFPTEERIDMRPEIWIAQELRRIGDE" \
"FNAYRDANDKAAALGKDKEINWFDISQSLWDVQKLTDAAIKKIEAALADMEAWLTQ"
diff1 = "....+.R+.A....+.A+.....+.++.....++.....E..DM.PE..IA..LR.IG+." \
"FNA......+.....K+.......+.+...+..K+...........+........+"
diff2 = "000000100100000010000000000000000000000100110110011001101100" \
"11100000000000010000000000000000010000000000000000000000"
diff3 = "000000100110110110100000000000001100111100110110011101101100" \
"11110010011001010010010000000000011000101011010001100000"
sc_des = {"scores": ["score"], "sequence": "B"}
new_cols = ["blosum62_B_raw", "blosum62_B_perc", "blosum62_B_identity",
"blosum62_B_positive", "blosum62_B_negative", "blosum62_B_ali",
"blosum62_B_per_res"]
# Start test
df = ri.parse_rosetta_file(self.silent1, sc_des)
df.add_reference_sequence("B", refseq)
# global sequence similarity
dfss = ra.sequence_similarity( df, "B" )
assert len(dfss.columns) == len(df.columns) + 7
assert len(set(dfss.columns).difference(set(df.columns))) == len(new_cols)
assert df.shape[0] == dfss.shape[0]
assert dfss.blosum62_B_raw.mean() == 41.0
assert dfss.blosum62_B_perc.mean() == pytest.approx(0.0692, rel=1e-3)
assert dfss.blosum62_B_identity.mean() == pytest.approx(24.333, rel=1e-3)
assert dfss.blosum62_B_positive.mean() == pytest.approx(46.166, rel=1e-3)
assert dfss.blosum62_B_negative.mean() == pytest.approx(69.833, rel=1e-3)
assert dfss.blosum62_B_ali.values[0] == diff1
# local sequence similarity
dfps = ra.positional_sequence_similarity(df, "B")
assert dfps.shape == (len(refseq), 2)
assert list(dfps.index.values) == list(range(1, len(refseq) + 1))
assert dfps.identity_perc.mean() < dfps.positive_perc.mean()
assert dfps.identity_perc.mean() == pytest.approx(0.2097, rel=1e-3)
assert dfps.positive_perc.mean() == pytest.approx(0.3979, rel=1e-3)
# binary similarity
df01 = ra.binary_similarity(df, "B")
assert len(df01.columns) == len(df.columns) + 1
assert df01.identity_B_binary.values[0] == diff2
# binary overlap
assert "".join([str(_) for _ in ra.binary_overlap(df01, "B")]) == diff3
def test_structure_similarities(self):
sse_ref = "LEEEEEEELLLEEEEEEELLLLHHHHHHHHHHHHLLLLLLLLLLLEEEELLLEEEELL"
diff1 = "LEEEEEEELLEEEEEEEELLLLHHHHHHHHHHHHLLLLLLLLLLEEEEELLLEEEEEL"
sc_des = {"scores": ["score"], "structure": "C"}
# Start test
df = ri.parse_rosetta_file(self.silent3, sc_des)
df.add_reference_structure("C", sse_ref)
# secondary structure distribution
dfsse = ra.positional_structural_count(df, 'C')
assert set(dfsse.columns.values) == set(['H', 'E', 'L'])
assert dfsse.shape[0] == len(sse_ref)
assert dfsse.H.mean() == pytest.approx(0.2033, rel=1e-3)
assert dfsse.E.mean() == pytest.approx(0.4038, rel=1e-3)
assert dfsse.L.mean() == pytest.approx(0.3927, rel=1e-3)
# secondary structure match
dfsm = ra.positional_structural_identity(df, 'C')
assert set(dfsm.columns.values) == set(['identity_perc', 'sse', 'max_sse'])
assert dfsm.shape[0] == len(sse_ref)
assert "".join(list(dfsm.sse.values)) == sse_ref
assert "".join(list(dfsm.max_sse.values)) == diff1
assert dfsm.identity_perc.mean() == pytest.approx(0.8121, rel=1e-3)
# percentages
dfpc = ra.secondary_structure_percentage(df, 'C')
assert 'structure_C_H' in dfpc.columns
assert 'structure_C_E' in dfpc.columns
assert 'structure_C_L' in dfpc.columns
assert dfpc['structure_C_H'].max() == pytest.approx(0.2413, rel=1e-3)
assert dfpc['structure_C_E'].mean() == pytest.approx(0.4038, rel=1e-3)
assert dfpc['structure_C_L'].min() == pytest.approx(0.3275, rel=1e-3)
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_sse_profile.png')
def test_sse_profile_plot(self):
sse_ref = "LEEEEEEELLLEEEEEEELLLLHHHHHHHHHHHHLLLLLLLLLLLEEEELLLEEEELL"
sc_des = {"scores": ["score"], "structure": "C"}
# Start test
df = ri.parse_rosetta_file(self.silent3, sc_des)
df.add_reference_structure("C", sse_ref)
df1 = ra.positional_structural_count(df, 'C')
df2 = ra.positional_structural_identity(df, 'C')
fig = plt.figure(figsize=(35, 10))
ax00 = plt.subplot2grid((1, 1), (0, 0))
rp.positional_structural_similarity_plot(pd.concat([df1, df2], axis=1), ax00)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_ramachandran.png')
def test_ramachandran_plot(self):
# Start test
sa_des = {"scores": ["score"], "sequence": "*", "dihedrals": "*"}
df = ri.parse_rosetta_file(self.silent4, sa_des)
fig = plt.figure(figsize=(15, 10))
fig2 = plt.figure(figsize=(15, 10))
with pytest.raises(ValueError):
rp.plot_ramachandran(df, "A", fig2)
rp.plot_ramachandran(df.iloc[0], "A", fig)
plt.tight_layout()
return fig
@pytest.mark.mpl_image_compare(baseline_dir=baseline_test_dir(),
filename='plot_dssp_vs_psipred.png')
def test_plot_dssp_vs_psipred(self):
# Start test
sa_des = {"scores": ["score"], "psipred": "*", "structure": "*"}
df = ri.parse_rosetta_file(self.silent4, sa_des)
fig = plt.figure(figsize=(15, 10))
ax = plt.gca()
rp.plot_dssp_vs_psipred( df.iloc[0], "A", ax )
plt.tight_layout()
return fig
|
[] |
[] |
[
"DISPLAY"
] |
[]
|
["DISPLAY"]
|
python
| 1 | 0 | |
pkg/backend/filestate/gcpauth.go
|
package filestate
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"golang.org/x/oauth2/google"
"gocloud.dev/blob/gcsblob"
"cloud.google.com/go/storage"
"gocloud.dev/blob"
"gocloud.dev/gcp"
)
type GoogleCredentials struct {
PrivateKeyID string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
ClientEmail string `json:"client_email"`
ClientID string `json:"client_id"`
}
func googleCredentials(ctx context.Context) (*google.Credentials, error) {
// GOOGLE_CREDENTIALS aren't part of the gcloud standard authorization variables
// but the GCP terraform provider uses this variable to allow users to authenticate
// with the contents of a credentials.json file instead of just a file path.
// https://www.terraform.io/docs/backends/types/gcs.html
if creds := os.Getenv("GOOGLE_CREDENTIALS"); creds != "" {
// We try $GOOGLE_CREDENTIALS before gcp.DefaultCredentials
// so that users can override the default creds
credentials, err := google.CredentialsFromJSON(ctx, []byte(creds), storage.ScopeReadWrite)
if err != nil {
return nil, fmt.Errorf("unable to parse credentials from $GOOGLE_CREDENTIALS: %w", err)
}
return credentials, nil
}
// DefaultCredentials will attempt to load creds in the following order:
// 1. a file located at $GOOGLE_APPLICATION_CREDENTIALS
// 2. application_default_credentials.json file in ~/.config/gcloud or $APPDATA\gcloud
credentials, err := gcp.DefaultCredentials(ctx)
if err != nil {
return nil, fmt.Errorf("unable to find gcp credentials: %w", err)
}
return credentials, nil
}
func GoogleCredentialsMux(ctx context.Context) (*blob.URLMux, error) {
credentials, err := googleCredentials(ctx)
if err != nil {
return nil, errors.New("missing google credentials")
}
client, err := gcp.NewHTTPClient(gcp.DefaultTransport(), credentials.TokenSource)
if err != nil {
return nil, err
}
options := gcsblob.Options{}
account := GoogleCredentials{}
err = json.Unmarshal(credentials.JSON, &account)
if err == nil && account.ClientEmail != "" && account.PrivateKey != "" {
options.GoogleAccessID = account.ClientEmail
options.PrivateKey = []byte(account.PrivateKey)
}
blobmux := &blob.URLMux{}
blobmux.RegisterBucket(gcsblob.Scheme, &gcsblob.URLOpener{
Client: client,
Options: options,
})
return blobmux, nil
}
|
[
"\"GOOGLE_CREDENTIALS\""
] |
[] |
[
"GOOGLE_CREDENTIALS"
] |
[]
|
["GOOGLE_CREDENTIALS"]
|
go
| 1 | 0 | |
utils/dockerutils.go
|
package utils
import (
"fmt"
"os"
"runtime"
dockerclient "github.com/docker/docker/client"
)
var (
// DefaultHTTPHost Default HTTP Host
DefaultHTTPHost = "localhost"
// DefaultHTTPPort Default HTTP Port
DefaultHTTPPort = 2375
// DefaultUnixSocket Path for the unix socket.
DefaultUnixSocket = "/var/run/docker.sock"
)
// getDockerHost returns the docker socket based on Environment settings
func getDockerHost() string {
dockerHost := os.Getenv("DOCKER_HOST")
if dockerHost == "" {
if runtime.GOOS == "windows" {
// If we do not have a host, default to TCP socket on Windows
dockerHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
} else {
// If we do not have a host, default to unix socket
dockerHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
}
}
return dockerHost
}
// GetDockerClient returns a new Docker Client based on the environment settings
func GetDockerClient() (*dockerclient.Client, error) {
return dockerclient.NewClient(getDockerHost(), "", nil, nil)
}
|
[
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_HOST"
] |
[]
|
["DOCKER_HOST"]
|
go
| 1 | 0 | |
perola/main.go
|
package main
import (
"os"
"os/signal"
"syscall"
"time"
"github.com/google/uuid"
"github.com/gorilla/websocket"
"github.com/sirupsen/logrus"
"github.com/streadway/amqp"
)
const (
queueName = "ms.tts"
createTtsTopicName = "create_tts"
ttsCreatedTopicName = "tts_created"
)
var (
amqpURL = os.Getenv("RABBITMQ_URL")
log = logrus.WithField("package", "main")
)
func init() {
logrus.SetFormatter(&logrus.TextFormatter{
FullTimestamp: true,
TimestampFormat: time.StampMilli,
})
logrus.SetLevel(logrus.TraceLevel) // sets log level
}
func check(err error) {
if err != nil {
log.Fatalln("failed:", err)
}
}
func main() {
var err error
conn, err := amqp.Dial(amqpURL)
check(err)
defer conn.Close()
go func() { log.Debugf("closing: %s", <-conn.NotifyClose(make(chan *amqp.Error))) }()
log.Debugln("got Connection, getting Channel")
channel, err := conn.Channel()
check(err)
defer channel.Close()
log.Debugf("declaring Queue %q", queueName)
queue, err := channel.QueueDeclare(
queueName, // name of the queue
true, // durable
false, // delete when unused
false, // exclusive
false, // noWait
nil, // arguments
)
check(err)
log.Debugf("binding Queue %q to amq.topic", queueName)
err = channel.QueueBind(queueName, createTtsTopicName, "amq.topic", false, nil)
check(err)
log.Debugln("Setting QoS")
err = channel.Qos(1, 0, true)
check(err)
log.Debugf("declared Queue (%q %d messages, %d consumers)", queue.Name, queue.Messages, queue.Consumers)
tag := uuid.NewString()
log.Debugf("starting Consume (tag:%q)", tag)
deliveries, err := channel.Consume(
queue.Name, // name
tag, // consumerTag,
false, // noAck
false, // exclusive
false, // noLocal
false, // noWait
nil, // arguments
)
check(err)
done := make(chan struct{})
go handle(deliveries, channel, done)
// wait for interrupt signal
stopChan := make(chan os.Signal, 1)
signal.Notify(stopChan, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
select {
case <-stopChan:
// will close the deliveries channel
err = channel.Cancel(tag, true)
check(err)
// wait for go handle(...)
case <-done:
break
}
log.Debugln("AMQP consumer shutdown.")
}
func handle(deliveries <-chan amqp.Delivery, channel *amqp.Channel, done chan<- struct{}) {
const (
writeWait = 10 * time.Second // Time allowed to write the data to the client.
pongWait = 60 * time.Second // Time allowed to read the next pong message from the client.
pingPeriod = (pongWait * 9) / 10 // Send pings to client with this period. Must be less than pongWait.
)
ws, _, err := dial()
if err != nil {
log.Println(err)
return
}
pingTicker := time.NewTicker(pingPeriod)
defer func() {
pingTicker.Stop()
_ = ws.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
_ = ws.Close()
log.Debugln("handle: deliveries channel closed")
done <- struct{}{}
}()
ws.SetReadLimit(512)
responses := make(chan ttsResponse)
defer close(responses)
go func(ws *websocket.Conn, ch chan<- ttsResponse) {
_ = ws.SetReadDeadline(time.Now().Add(pongWait))
ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
var resp ttsResponse
if err := ws.ReadJSON(&resp); err != nil {
log.Debugln("NextReader:", err)
return
}
ch <- resp
}
}(ws, responses)
log.Debugln("Listening...")
for {
select {
case delivery := <-deliveries:
if delivery.Body == nil {
return
}
message := string(delivery.Body)
if message == "" {
log.Debugln("empty message. ignoring...")
_ = delivery.Ack(false)
break
}
log.Infoln("DELIVERY:", message)
resp := tts(ws, responses, message, "guilherme")
if !resp.Payload.Success {
log.Println("!Success:", resp.Payload.Reason)
return
}
_ = delivery.Ack(false)
err = channel.Publish("amq.topic", ttsCreatedTopicName, false, false, amqp.Publishing{
ContentType: "text/plain",
ContentEncoding: "utf-8",
DeliveryMode: 2,
Expiration: "60000",
Body: []byte("https://api.cybervox.ai" + resp.Payload.AudioURL),
})
if err != nil {
log.Errorln("handle > channel.Publish:", err)
}
case <-pingTicker.C:
_ = ws.SetWriteDeadline(time.Now().Add(writeWait))
//log.Debugln("Ping...")
if err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil {
log.Debugln("Ping:", err)
return
}
}
}
}
|
[
"\"RABBITMQ_URL\""
] |
[] |
[
"RABBITMQ_URL"
] |
[]
|
["RABBITMQ_URL"]
|
go
| 1 | 0 | |
examples/02_placeholder.py
|
# -*- coding: utf-8 -*-
# @Author: yanqiang
# @Date: 2018-05-14 23:01:30
# @Last Modified by: yanqiang
# @Last Modified time: 2018-05-14 23:12:22
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
# Example 1: feed_dict with placeholder
# a is a placeholder for a vector of 3 elements,type tf.float32
a = tf.placeholder(tf.float32, shape=[3])
b = tf.constant([5, 5, 5], tf.float32)
# use the placeholder as you would a constant
c = a + b # short for tf.add(a,b)
writer = tf.summary.FileWriter('graphs/placeholders', tf.get_default_graph())
with tf.Session() as sess:
# compute the value of c given the value if a is [1,2,3]
print(sess.run(c, {a: [1, 2, 3]}))
writer.close()
# Example 2:feed_dict with variables
a = tf.add(2, 5)
b = tf.multiply(a, 3)
with tf.Session() as sess:
print(sess.run(b)) # >> 21
# compute the value of b given the value of a is 15
print(sess.run(b, feed_dict={a: 15}))
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
linchpin/provision/library/bkr_info.py
|
#!/usr/bin/env python
import os
import xml.etree.ElementTree as eT
from bkr.client import conf, BeakerCommand
from bkr.common.hub import HubProxy
from bkr.common.pyconfig import PyConfigParser
from json import dumps, loads
from time import sleep
from sys import stderr
BEAKER_CONF = \
(os.environ.get('BEAKER_CONF', '/etc/beaker/client.conf'))
WAIT_TIME = 60
class BeakerTargets(object):
def __init__(self, params={}, logger=None):
self.__dict__ = params.copy()
self.conf = PyConfigParser()
default_config = os.path.expanduser(BEAKER_CONF)
self.conf.load_from_file(default_config)
self.hub = HubProxy(logger=logger, conf=self.conf)
def _get_url(self, bkr_id):
"""
Constructs the Beaker URL for the job related to the provided Beaker
ID. That ID should be all numeric, unless the structure of Beaker
changes in the future. If that's the case, then the ID should be
appropriately URL encoded to be appended to the end of a URL properly.
"""
base = self.conf.get('HUB_URL', '')
if base == '':
raise Exception("Unable to construct URL")
if base[-1] != '/':
base += '/'
return base + 'jobs/' + bkr_id
def get_system_statuses(self):
"""
Checks on the status of a set of Beaker jobs (ids) and returns their
hostname once the jobs have reached their defined status.
"""
attempts = 0
pass_count = 0
all_count = len(self.ids)
while attempts < self.max_attempts:
job_results = self._check_jobs(self.ids)
pass_count = 0
for resource in job_results['resources']:
result = resource['result']
status = resource['status']
print >> stderr, "status: %s, result: %s" % (status, result)
if status not in ['Cancelled', 'Aborted']:
if result == 'Pass' or (result == 'Warn' and self.skip_no_system):
pass_count += 1
elif result in ['Fail', 'Warn', 'Panic', 'Completed']:
raise Exception("System failed with state '{0}'"\
.format(result))
elif status == 'Aborted':
if result == 'Warn' and self.skip_no_system:
pass_count += 1
else:
raise Exception("System aborted")
elif status == 'Cancelled':
raise Exception("System canceled")
attempts += 1
if pass_count == all_count:
return job_results['resources']
sleep(WAIT_TIME)
raise Exception("{0} system(s) never completed in {1} polling attempts. {2}"\
.format(all_count - pass_count, attempts, dumps(job_results)))
def _check_jobs(self, ids):
"""
Get state of a job in Beaker
"""
jobs = ["J:" + _id for _id in ids]
results = {}
resources = []
bkrcmd = BeakerCommand('BeakerCommand')
bkrcmd.check_taskspec_args(jobs)
for task in jobs:
myxml = self.hub.taskactions.to_xml(task)
myxml = myxml.encode('utf8')
root = eT.fromstring(myxml)
# TODO: Using getiterator() since its backward compatible
# with Python 2.6
# This is deprectated in 2.7 and we should be using iter()
for job in root.getiterator('job'):
results.update({'job_id': job.get('id'),
'results': job.get('result')})
for recipe in root.getiterator('recipe'):
resources.append({'family': recipe.get('family'),
'distro': recipe.get('distro'),
'arch': recipe.get('arch'),
'variant': recipe.get('variant'),
'system': recipe.get('system'),
'status': recipe.get('status'),
'result': recipe.get('result'),
'id': recipe.get('job_id')})
results.update({'resources': resources})
return results
def main():
mod = AnsibleModule(argument_spec={
'ids': {'type': 'list'},
'skip_no_system': {'type': 'bool', 'default': False},
'max_attempts': {'type': 'int', 'default': 60}
})
beaker = BeakerTargets(mod.params)
try:
results=beaker.get_system_statuses()
mod.exit_json(hosts=results, changed=True, success=True)
except Exception as ex:
mod.fail_json(msg=str(ex))
# import module snippets
from ansible.module_utils.basic import *
main()
|
[] |
[] |
[
"BEAKER_CONF"
] |
[]
|
["BEAKER_CONF"]
|
python
| 1 | 0 | |
actions/actions_test.go
|
/*
@Time 2019-09-04 16:21
@Author ZH
*/
package actions
import (
"os"
"path/filepath"
"testing"
)
var testConfigFile = filepath.Join(os.Getenv("GOPATH"), "src/github.com/securekey/fabric-examples/fabric-cli", "test/fixtures/config/config_test_local.yaml")
func TestNewAction(t *testing.T) {
action, err := New(testConfigFile, AutoDetectSelectionProvider)
if err != nil {
t.Fatal(err)
}
_ = action
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
src/net/http/serve_test.go
|
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// End-to-end serving tests
package http_test
import (
"bufio"
"bytes"
"crypto/tls"
"errors"
"fmt"
"internal/testenv"
"io"
"io/ioutil"
"log"
"math/rand"
"net"
. "net/http"
"net/http/httptest"
"net/http/httputil"
"net/http/internal"
"net/url"
"os"
"os/exec"
"reflect"
"runtime"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"testing"
"time"
)
type dummyAddr string
type oneConnListener struct {
conn net.Conn
}
func (l *oneConnListener) Accept() (c net.Conn, err error) {
c = l.conn
if c == nil {
err = io.EOF
return
}
err = nil
l.conn = nil
return
}
func (l *oneConnListener) Close() error {
return nil
}
func (l *oneConnListener) Addr() net.Addr {
return dummyAddr("test-address")
}
func (a dummyAddr) Network() string {
return string(a)
}
func (a dummyAddr) String() string {
return string(a)
}
type noopConn struct{}
func (noopConn) LocalAddr() net.Addr { return dummyAddr("local-addr") }
func (noopConn) RemoteAddr() net.Addr { return dummyAddr("remote-addr") }
func (noopConn) SetDeadline(t time.Time) error { return nil }
func (noopConn) SetReadDeadline(t time.Time) error { return nil }
func (noopConn) SetWriteDeadline(t time.Time) error { return nil }
type rwTestConn struct {
io.Reader
io.Writer
noopConn
closeFunc func() error // called if non-nil
closec chan bool // else, if non-nil, send value to it on close
}
func (c *rwTestConn) Close() error {
if c.closeFunc != nil {
return c.closeFunc()
}
select {
case c.closec <- true:
default:
}
return nil
}
type testConn struct {
readMu sync.Mutex // for TestHandlerBodyClose
readBuf bytes.Buffer
writeBuf bytes.Buffer
closec chan bool // if non-nil, send value to it on close
noopConn
}
func (c *testConn) Read(b []byte) (int, error) {
c.readMu.Lock()
defer c.readMu.Unlock()
return c.readBuf.Read(b)
}
func (c *testConn) Write(b []byte) (int, error) {
return c.writeBuf.Write(b)
}
func (c *testConn) Close() error {
select {
case c.closec <- true:
default:
}
return nil
}
// reqBytes treats req as a request (with \n delimiters) and returns it with \r\n delimiters,
// ending in \r\n\r\n
func reqBytes(req string) []byte {
return []byte(strings.Replace(strings.TrimSpace(req), "\n", "\r\n", -1) + "\r\n\r\n")
}
type handlerTest struct {
handler Handler
}
func newHandlerTest(h Handler) handlerTest {
return handlerTest{h}
}
func (ht handlerTest) rawResponse(req string) string {
reqb := reqBytes(req)
var output bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(reqb),
Writer: &output,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
go Serve(ln, ht.handler)
<-conn.closec
return output.String()
}
func TestConsumingBodyOnNextConn(t *testing.T) {
defer afterTest(t)
conn := new(testConn)
for i := 0; i < 2; i++ {
conn.readBuf.Write([]byte(
"POST / HTTP/1.1\r\n" +
"Host: test\r\n" +
"Content-Length: 11\r\n" +
"\r\n" +
"foo=1&bar=1"))
}
reqNum := 0
ch := make(chan *Request)
servech := make(chan error)
listener := &oneConnListener{conn}
handler := func(res ResponseWriter, req *Request) {
reqNum++
ch <- req
}
go func() {
servech <- Serve(listener, HandlerFunc(handler))
}()
var req *Request
req = <-ch
if req == nil {
t.Fatal("Got nil first request.")
}
if req.Method != "POST" {
t.Errorf("For request #1's method, got %q; expected %q",
req.Method, "POST")
}
req = <-ch
if req == nil {
t.Fatal("Got nil first request.")
}
if req.Method != "POST" {
t.Errorf("For request #2's method, got %q; expected %q",
req.Method, "POST")
}
if serveerr := <-servech; serveerr != io.EOF {
t.Errorf("Serve returned %q; expected EOF", serveerr)
}
}
type stringHandler string
func (s stringHandler) ServeHTTP(w ResponseWriter, r *Request) {
w.Header().Set("Result", string(s))
}
var handlers = []struct {
pattern string
msg string
}{
{"/", "Default"},
{"/someDir/", "someDir"},
{"/#/", "hash"},
{"someHost.com/someDir/", "someHost.com/someDir"},
}
var vtests = []struct {
url string
expected string
}{
{"http://localhost/someDir/apage", "someDir"},
{"http://localhost/%23/apage", "hash"},
{"http://localhost/otherDir/apage", "Default"},
{"http://someHost.com/someDir/apage", "someHost.com/someDir"},
{"http://otherHost.com/someDir/apage", "someDir"},
{"http://otherHost.com/aDir/apage", "Default"},
// redirections for trees
{"http://localhost/someDir", "/someDir/"},
{"http://localhost/%23", "/%23/"},
{"http://someHost.com/someDir", "/someDir/"},
}
func TestHostHandlers(t *testing.T) {
defer afterTest(t)
mux := NewServeMux()
for _, h := range handlers {
mux.Handle(h.pattern, stringHandler(h.msg))
}
ts := httptest.NewServer(mux)
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
cc := httputil.NewClientConn(conn, nil)
for _, vt := range vtests {
var r *Response
var req Request
if req.URL, err = url.Parse(vt.url); err != nil {
t.Errorf("cannot parse url: %v", err)
continue
}
if err := cc.Write(&req); err != nil {
t.Errorf("writing request: %v", err)
continue
}
r, err := cc.Read(&req)
if err != nil {
t.Errorf("reading response: %v", err)
continue
}
switch r.StatusCode {
case StatusOK:
s := r.Header.Get("Result")
if s != vt.expected {
t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected)
}
case StatusMovedPermanently:
s := r.Header.Get("Location")
if s != vt.expected {
t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected)
}
default:
t.Errorf("Get(%q) unhandled status code %d", vt.url, r.StatusCode)
}
}
}
var serveMuxRegister = []struct {
pattern string
h Handler
}{
{"/dir/", serve(200)},
{"/search", serve(201)},
{"codesearch.google.com/search", serve(202)},
{"codesearch.google.com/", serve(203)},
{"example.com/", HandlerFunc(checkQueryStringHandler)},
}
// serve returns a handler that sends a response with the given code.
func serve(code int) HandlerFunc {
return func(w ResponseWriter, r *Request) {
w.WriteHeader(code)
}
}
// checkQueryStringHandler checks if r.URL.RawQuery has the same value
// as the URL excluding the scheme and the query string and sends 200
// response code if it is, 500 otherwise.
func checkQueryStringHandler(w ResponseWriter, r *Request) {
u := *r.URL
u.Scheme = "http"
u.Host = r.Host
u.RawQuery = ""
if "http://"+r.URL.RawQuery == u.String() {
w.WriteHeader(200)
} else {
w.WriteHeader(500)
}
}
var serveMuxTests = []struct {
method string
host string
path string
code int
pattern string
}{
{"GET", "google.com", "/", 404, ""},
{"GET", "google.com", "/dir", 301, "/dir/"},
{"GET", "google.com", "/dir/", 200, "/dir/"},
{"GET", "google.com", "/dir/file", 200, "/dir/"},
{"GET", "google.com", "/search", 201, "/search"},
{"GET", "google.com", "/search/", 404, ""},
{"GET", "google.com", "/search/foo", 404, ""},
{"GET", "codesearch.google.com", "/search", 202, "codesearch.google.com/search"},
{"GET", "codesearch.google.com", "/search/", 203, "codesearch.google.com/"},
{"GET", "codesearch.google.com", "/search/foo", 203, "codesearch.google.com/"},
{"GET", "codesearch.google.com", "/", 203, "codesearch.google.com/"},
{"GET", "images.google.com", "/search", 201, "/search"},
{"GET", "images.google.com", "/search/", 404, ""},
{"GET", "images.google.com", "/search/foo", 404, ""},
{"GET", "google.com", "/../search", 301, "/search"},
{"GET", "google.com", "/dir/..", 301, ""},
{"GET", "google.com", "/dir/..", 301, ""},
{"GET", "google.com", "/dir/./file", 301, "/dir/"},
// The /foo -> /foo/ redirect applies to CONNECT requests
// but the path canonicalization does not.
{"CONNECT", "google.com", "/dir", 301, "/dir/"},
{"CONNECT", "google.com", "/../search", 404, ""},
{"CONNECT", "google.com", "/dir/..", 200, "/dir/"},
{"CONNECT", "google.com", "/dir/..", 200, "/dir/"},
{"CONNECT", "google.com", "/dir/./file", 200, "/dir/"},
}
func TestServeMuxHandler(t *testing.T) {
mux := NewServeMux()
for _, e := range serveMuxRegister {
mux.Handle(e.pattern, e.h)
}
for _, tt := range serveMuxTests {
r := &Request{
Method: tt.method,
Host: tt.host,
URL: &url.URL{
Path: tt.path,
},
}
h, pattern := mux.Handler(r)
rr := httptest.NewRecorder()
h.ServeHTTP(rr, r)
if pattern != tt.pattern || rr.Code != tt.code {
t.Errorf("%s %s %s = %d, %q, want %d, %q", tt.method, tt.host, tt.path, rr.Code, pattern, tt.code, tt.pattern)
}
}
}
var serveMuxTests2 = []struct {
method string
host string
url string
code int
redirOk bool
}{
{"GET", "google.com", "/", 404, false},
{"GET", "example.com", "/test/?example.com/test/", 200, false},
{"GET", "example.com", "test/?example.com/test/", 200, true},
}
// TestServeMuxHandlerRedirects tests that automatic redirects generated by
// mux.Handler() shouldn't clear the request's query string.
func TestServeMuxHandlerRedirects(t *testing.T) {
mux := NewServeMux()
for _, e := range serveMuxRegister {
mux.Handle(e.pattern, e.h)
}
for _, tt := range serveMuxTests2 {
tries := 1
turl := tt.url
for tries > 0 {
u, e := url.Parse(turl)
if e != nil {
t.Fatal(e)
}
r := &Request{
Method: tt.method,
Host: tt.host,
URL: u,
}
h, _ := mux.Handler(r)
rr := httptest.NewRecorder()
h.ServeHTTP(rr, r)
if rr.Code != 301 {
if rr.Code != tt.code {
t.Errorf("%s %s %s = %d, want %d", tt.method, tt.host, tt.url, rr.Code, tt.code)
}
break
}
if !tt.redirOk {
t.Errorf("%s %s %s, unexpected redirect", tt.method, tt.host, tt.url)
break
}
turl = rr.HeaderMap.Get("Location")
tries--
}
if tries < 0 {
t.Errorf("%s %s %s, too many redirects", tt.method, tt.host, tt.url)
}
}
}
// Tests for https://golang.org/issue/900
func TestMuxRedirectLeadingSlashes(t *testing.T) {
paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"}
for _, path := range paths {
req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + path + " HTTP/1.1\r\nHost: test\r\n\r\n")))
if err != nil {
t.Errorf("%s", err)
}
mux := NewServeMux()
resp := httptest.NewRecorder()
mux.ServeHTTP(resp, req)
if loc, expected := resp.Header().Get("Location"), "/foo.txt"; loc != expected {
t.Errorf("Expected Location header set to %q; got %q", expected, loc)
return
}
if code, expected := resp.Code, StatusMovedPermanently; code != expected {
t.Errorf("Expected response code of StatusMovedPermanently; got %d", code)
return
}
}
}
func TestServerTimeouts(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping test; see https://golang.org/issue/7237")
}
setParallel(t)
defer afterTest(t)
reqNum := 0
ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) {
reqNum++
fmt.Fprintf(res, "req=%d", reqNum)
}))
ts.Config.ReadTimeout = 250 * time.Millisecond
ts.Config.WriteTimeout = 250 * time.Millisecond
ts.Start()
defer ts.Close()
// Hit the HTTP server successfully.
tr := &Transport{DisableKeepAlives: true} // they interfere with this test
defer tr.CloseIdleConnections()
c := &Client{Transport: tr}
r, err := c.Get(ts.URL)
if err != nil {
t.Fatalf("http Get #1: %v", err)
}
got, _ := ioutil.ReadAll(r.Body)
expected := "req=1"
if string(got) != expected {
t.Errorf("Unexpected response for request #1; got %q; expected %q",
string(got), expected)
}
// Slow client that should timeout.
t1 := time.Now()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
buf := make([]byte, 1)
n, err := conn.Read(buf)
latency := time.Since(t1)
if n != 0 || err != io.EOF {
t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF)
}
if latency < 200*time.Millisecond /* fudge from 250 ms above */ {
t.Errorf("got EOF after %s, want >= %s", latency, 200*time.Millisecond)
}
// Hit the HTTP server successfully again, verifying that the
// previous slow connection didn't run our handler. (that we
// get "req=2", not "req=3")
r, err = Get(ts.URL)
if err != nil {
t.Fatalf("http Get #2: %v", err)
}
got, _ = ioutil.ReadAll(r.Body)
expected = "req=2"
if string(got) != expected {
t.Errorf("Get #2 got %q, want %q", string(got), expected)
}
if !testing.Short() {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer conn.Close()
go io.Copy(ioutil.Discard, conn)
for i := 0; i < 5; i++ {
_, err := conn.Write([]byte("GET / HTTP/1.1\r\nHost: foo\r\n\r\n"))
if err != nil {
t.Fatalf("on write %d: %v", i, err)
}
time.Sleep(ts.Config.ReadTimeout / 2)
}
}
}
// golang.org/issue/4741 -- setting only a write timeout that triggers
// shouldn't cause a handler to block forever on reads (next HTTP
// request) that will never happen.
func TestOnlyWriteTimeout(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
var conn net.Conn
var afterTimeoutErrc = make(chan error, 1)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, req *Request) {
buf := make([]byte, 512<<10)
_, err := w.Write(buf)
if err != nil {
t.Errorf("handler Write error: %v", err)
return
}
conn.SetWriteDeadline(time.Now().Add(-30 * time.Second))
_, err = w.Write(buf)
afterTimeoutErrc <- err
}))
ts.Listener = trackLastConnListener{ts.Listener, &conn}
ts.Start()
defer ts.Close()
tr := &Transport{DisableKeepAlives: false}
defer tr.CloseIdleConnections()
c := &Client{Transport: tr}
errc := make(chan error)
go func() {
res, err := c.Get(ts.URL)
if err != nil {
errc <- err
return
}
_, err = io.Copy(ioutil.Discard, res.Body)
errc <- err
}()
select {
case err := <-errc:
if err == nil {
t.Errorf("expected an error from Get request")
}
case <-time.After(5 * time.Second):
t.Fatal("timeout waiting for Get error")
}
if err := <-afterTimeoutErrc; err == nil {
t.Error("expected write error after timeout")
}
}
// trackLastConnListener tracks the last net.Conn that was accepted.
type trackLastConnListener struct {
net.Listener
last *net.Conn // destination
}
func (l trackLastConnListener) Accept() (c net.Conn, err error) {
c, err = l.Listener.Accept()
*l.last = c
return
}
// TestIdentityResponse verifies that a handler can unset
func TestIdentityResponse(t *testing.T) {
defer afterTest(t)
handler := HandlerFunc(func(rw ResponseWriter, req *Request) {
rw.Header().Set("Content-Length", "3")
rw.Header().Set("Transfer-Encoding", req.FormValue("te"))
switch {
case req.FormValue("overwrite") == "1":
_, err := rw.Write([]byte("foo TOO LONG"))
if err != ErrContentLength {
t.Errorf("expected ErrContentLength; got %v", err)
}
case req.FormValue("underwrite") == "1":
rw.Header().Set("Content-Length", "500")
rw.Write([]byte("too short"))
default:
rw.Write([]byte("foo"))
}
})
ts := httptest.NewServer(handler)
defer ts.Close()
// Note: this relies on the assumption (which is true) that
// Get sends HTTP/1.1 or greater requests. Otherwise the
// server wouldn't have the choice to send back chunked
// responses.
for _, te := range []string{"", "identity"} {
url := ts.URL + "/?te=" + te
res, err := Get(url)
if err != nil {
t.Fatalf("error with Get of %s: %v", url, err)
}
if cl, expected := res.ContentLength, int64(3); cl != expected {
t.Errorf("for %s expected res.ContentLength of %d; got %d", url, expected, cl)
}
if cl, expected := res.Header.Get("Content-Length"), "3"; cl != expected {
t.Errorf("for %s expected Content-Length header of %q; got %q", url, expected, cl)
}
if tl, expected := len(res.TransferEncoding), 0; tl != expected {
t.Errorf("for %s expected len(res.TransferEncoding) of %d; got %d (%v)",
url, expected, tl, res.TransferEncoding)
}
res.Body.Close()
}
// Verify that ErrContentLength is returned
url := ts.URL + "/?overwrite=1"
res, err := Get(url)
if err != nil {
t.Fatalf("error with Get of %s: %v", url, err)
}
res.Body.Close()
// Verify that the connection is closed when the declared Content-Length
// is larger than what the handler wrote.
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("error dialing: %v", err)
}
_, err = conn.Write([]byte("GET /?underwrite=1 HTTP/1.1\r\nHost: foo\r\n\r\n"))
if err != nil {
t.Fatalf("error writing: %v", err)
}
// The ReadAll will hang for a failing test, so use a Timer to
// fail explicitly.
goTimeout(t, 2*time.Second, func() {
got, _ := ioutil.ReadAll(conn)
expectedSuffix := "\r\n\r\ntoo short"
if !strings.HasSuffix(string(got), expectedSuffix) {
t.Errorf("Expected output to end with %q; got response body %q",
expectedSuffix, string(got))
}
})
}
func testTCPConnectionCloses(t *testing.T, req string, h Handler) {
defer afterTest(t)
s := httptest.NewServer(h)
defer s.Close()
conn, err := net.Dial("tcp", s.Listener.Addr().String())
if err != nil {
t.Fatal("dial error:", err)
}
defer conn.Close()
_, err = fmt.Fprint(conn, req)
if err != nil {
t.Fatal("print error:", err)
}
r := bufio.NewReader(conn)
res, err := ReadResponse(r, &Request{Method: "GET"})
if err != nil {
t.Fatal("ReadResponse error:", err)
}
didReadAll := make(chan bool, 1)
go func() {
select {
case <-time.After(5 * time.Second):
t.Error("body not closed after 5s")
return
case <-didReadAll:
}
}()
_, err = ioutil.ReadAll(r)
if err != nil {
t.Fatal("read error:", err)
}
didReadAll <- true
if !res.Close {
t.Errorf("Response.Close = false; want true")
}
}
// TestServeHTTP10Close verifies that HTTP/1.0 requests won't be kept alive.
func TestServeHTTP10Close(t *testing.T) {
testTCPConnectionCloses(t, "GET / HTTP/1.0\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
}
// TestClientCanClose verifies that clients can also force a connection to close.
func TestClientCanClose(t *testing.T) {
testTCPConnectionCloses(t, "GET / HTTP/1.1\r\nConnection: close\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
// Nothing.
}))
}
// TestHandlersCanSetConnectionClose verifies that handlers can force a connection to close,
// even for HTTP/1.1 requests.
func TestHandlersCanSetConnectionClose11(t *testing.T) {
testTCPConnectionCloses(t, "GET / HTTP/1.1\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Connection", "close")
}))
}
func TestHandlersCanSetConnectionClose10(t *testing.T) {
testTCPConnectionCloses(t, "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Connection", "close")
}))
}
func TestSetsRemoteAddr_h1(t *testing.T) { testSetsRemoteAddr(t, h1Mode) }
func TestSetsRemoteAddr_h2(t *testing.T) { testSetsRemoteAddr(t, h2Mode) }
func testSetsRemoteAddr(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "%s", r.RemoteAddr)
}))
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatalf("Get error: %v", err)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll error: %v", err)
}
ip := string(body)
if !strings.HasPrefix(ip, "127.0.0.1:") && !strings.HasPrefix(ip, "[::1]:") {
t.Fatalf("Expected local addr; got %q", ip)
}
}
type blockingRemoteAddrListener struct {
net.Listener
conns chan<- net.Conn
}
func (l *blockingRemoteAddrListener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err != nil {
return nil, err
}
brac := &blockingRemoteAddrConn{
Conn: c,
addrs: make(chan net.Addr, 1),
}
l.conns <- brac
return brac, nil
}
type blockingRemoteAddrConn struct {
net.Conn
addrs chan net.Addr
}
func (c *blockingRemoteAddrConn) RemoteAddr() net.Addr {
return <-c.addrs
}
// Issue 12943
func TestServerAllowsBlockingRemoteAddr(t *testing.T) {
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "RA:%s", r.RemoteAddr)
}))
conns := make(chan net.Conn)
ts.Listener = &blockingRemoteAddrListener{
Listener: ts.Listener,
conns: conns,
}
ts.Start()
defer ts.Close()
tr := &Transport{DisableKeepAlives: true}
defer tr.CloseIdleConnections()
c := &Client{Transport: tr, Timeout: time.Second}
fetch := func(response chan string) {
resp, err := c.Get(ts.URL)
if err != nil {
t.Error(err)
response <- ""
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Error(err)
response <- ""
return
}
response <- string(body)
}
// Start a request. The server will block on getting conn.RemoteAddr.
response1c := make(chan string, 1)
go fetch(response1c)
// Wait for the server to accept it; grab the connection.
conn1 := <-conns
// Start another request and grab its connection
response2c := make(chan string, 1)
go fetch(response2c)
var conn2 net.Conn
select {
case conn2 = <-conns:
case <-time.After(time.Second):
t.Fatal("Second Accept didn't happen")
}
// Send a response on connection 2.
conn2.(*blockingRemoteAddrConn).addrs <- &net.TCPAddr{
IP: net.ParseIP("12.12.12.12"), Port: 12}
// ... and see it
response2 := <-response2c
if g, e := response2, "RA:12.12.12.12:12"; g != e {
t.Fatalf("response 2 addr = %q; want %q", g, e)
}
// Finish the first response.
conn1.(*blockingRemoteAddrConn).addrs <- &net.TCPAddr{
IP: net.ParseIP("21.21.21.21"), Port: 21}
// ... and see it
response1 := <-response1c
if g, e := response1, "RA:21.21.21.21:21"; g != e {
t.Fatalf("response 1 addr = %q; want %q", g, e)
}
}
func TestIdentityResponseHeaders(t *testing.T) {
defer afterTest(t)
log.SetOutput(ioutil.Discard) // is noisy otherwise
defer log.SetOutput(os.Stderr)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Transfer-Encoding", "identity")
w.(Flusher).Flush()
fmt.Fprintf(w, "I am an identity response.")
}))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatalf("Get error: %v", err)
}
defer res.Body.Close()
if g, e := res.TransferEncoding, []string(nil); !reflect.DeepEqual(g, e) {
t.Errorf("expected TransferEncoding of %v; got %v", e, g)
}
if _, haveCL := res.Header["Content-Length"]; haveCL {
t.Errorf("Unexpected Content-Length")
}
if !res.Close {
t.Errorf("expected Connection: close; got %v", res.Close)
}
}
// TestHeadResponses verifies that all MIME type sniffing and Content-Length
// counting of GET requests also happens on HEAD requests.
func TestHeadResponses_h1(t *testing.T) { testHeadResponses(t, h1Mode) }
func TestHeadResponses_h2(t *testing.T) { testHeadResponses(t, h2Mode) }
func testHeadResponses(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
_, err := w.Write([]byte("<html>"))
if err != nil {
t.Errorf("ResponseWriter.Write: %v", err)
}
// Also exercise the ReaderFrom path
_, err = io.Copy(w, strings.NewReader("789a"))
if err != nil {
t.Errorf("Copy(ResponseWriter, ...): %v", err)
}
}))
defer cst.close()
res, err := cst.c.Head(cst.ts.URL)
if err != nil {
t.Error(err)
}
if len(res.TransferEncoding) > 0 {
t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding)
}
if ct := res.Header.Get("Content-Type"); ct != "text/html; charset=utf-8" {
t.Errorf("Content-Type: %q; want text/html; charset=utf-8", ct)
}
if v := res.ContentLength; v != 10 {
t.Errorf("Content-Length: %d; want 10", v)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Error(err)
}
if len(body) > 0 {
t.Errorf("got unexpected body %q", string(body))
}
}
func TestTLSHandshakeTimeout(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping test; see https://golang.org/issue/7237")
}
setParallel(t)
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
errc := make(chanWriter, 10) // but only expecting 1
ts.Config.ReadTimeout = 250 * time.Millisecond
ts.Config.ErrorLog = log.New(errc, "", 0)
ts.StartTLS()
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer conn.Close()
goTimeout(t, 10*time.Second, func() {
var buf [1]byte
n, err := conn.Read(buf[:])
if err == nil || n != 0 {
t.Errorf("Read = %d, %v; want an error and no bytes", n, err)
}
})
select {
case v := <-errc:
if !strings.Contains(v, "timeout") && !strings.Contains(v, "TLS handshake") {
t.Errorf("expected a TLS handshake timeout error; got %q", v)
}
case <-time.After(5 * time.Second):
t.Errorf("timeout waiting for logged error")
}
}
func TestTLSServer(t *testing.T) {
defer afterTest(t)
ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.TLS != nil {
w.Header().Set("X-TLS-Set", "true")
if r.TLS.HandshakeComplete {
w.Header().Set("X-TLS-HandshakeComplete", "true")
}
}
}))
ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
defer ts.Close()
// Connect an idle TCP connection to this server before we run
// our real tests. This idle connection used to block forever
// in the TLS handshake, preventing future connections from
// being accepted. It may prevent future accidental blocking
// in newConn.
idleConn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer idleConn.Close()
goTimeout(t, 10*time.Second, func() {
if !strings.HasPrefix(ts.URL, "https://") {
t.Errorf("expected test TLS server to start with https://, got %q", ts.URL)
return
}
noVerifyTransport := &Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
client := &Client{Transport: noVerifyTransport}
res, err := client.Get(ts.URL)
if err != nil {
t.Error(err)
return
}
if res == nil {
t.Errorf("got nil Response")
return
}
defer res.Body.Close()
if res.Header.Get("X-TLS-Set") != "true" {
t.Errorf("expected X-TLS-Set response header")
return
}
if res.Header.Get("X-TLS-HandshakeComplete") != "true" {
t.Errorf("expected X-TLS-HandshakeComplete header")
}
})
}
func TestAutomaticHTTP2_Serve(t *testing.T) {
defer afterTest(t)
ln := newLocalListener(t)
ln.Close() // immediately (not a defer!)
var s Server
if err := s.Serve(ln); err == nil {
t.Fatal("expected an error")
}
on := s.TLSNextProto["h2"] != nil
if !on {
t.Errorf("http2 wasn't automatically enabled")
}
}
func TestAutomaticHTTP2_ListenAndServe(t *testing.T) {
cert, err := tls.X509KeyPair(internal.LocalhostCert, internal.LocalhostKey)
if err != nil {
t.Fatal(err)
}
testAutomaticHTTP2_ListenAndServe(t, &tls.Config{
Certificates: []tls.Certificate{cert},
})
}
func TestAutomaticHTTP2_ListenAndServe_GetCertificate(t *testing.T) {
cert, err := tls.X509KeyPair(internal.LocalhostCert, internal.LocalhostKey)
if err != nil {
t.Fatal(err)
}
testAutomaticHTTP2_ListenAndServe(t, &tls.Config{
GetCertificate: func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
return &cert, nil
},
})
}
func testAutomaticHTTP2_ListenAndServe(t *testing.T, tlsConf *tls.Config) {
defer afterTest(t)
defer SetTestHookServerServe(nil)
var ok bool
var s *Server
const maxTries = 5
var ln net.Listener
Try:
for try := 0; try < maxTries; try++ {
ln = newLocalListener(t)
addr := ln.Addr().String()
ln.Close()
t.Logf("Got %v", addr)
lnc := make(chan net.Listener, 1)
SetTestHookServerServe(func(s *Server, ln net.Listener) {
lnc <- ln
})
s = &Server{
Addr: addr,
TLSConfig: tlsConf,
}
errc := make(chan error, 1)
go func() { errc <- s.ListenAndServeTLS("", "") }()
select {
case err := <-errc:
t.Logf("On try #%v: %v", try+1, err)
continue
case ln = <-lnc:
ok = true
t.Logf("Listening on %v", ln.Addr().String())
break Try
}
}
if !ok {
t.Fatalf("Failed to start up after %d tries", maxTries)
}
defer ln.Close()
c, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{
InsecureSkipVerify: true,
NextProtos: []string{"h2", "http/1.1"},
})
if err != nil {
t.Fatal(err)
}
defer c.Close()
if got, want := c.ConnectionState().NegotiatedProtocol, "h2"; got != want {
t.Errorf("NegotiatedProtocol = %q; want %q", got, want)
}
if got, want := c.ConnectionState().NegotiatedProtocolIsMutual, true; got != want {
t.Errorf("NegotiatedProtocolIsMutual = %v; want %v", got, want)
}
}
type serverExpectTest struct {
contentLength int // of request body
chunked bool
expectation string // e.g. "100-continue"
readBody bool // whether handler should read the body (if false, sends StatusUnauthorized)
expectedResponse string // expected substring in first line of http response
}
func expectTest(contentLength int, expectation string, readBody bool, expectedResponse string) serverExpectTest {
return serverExpectTest{
contentLength: contentLength,
expectation: expectation,
readBody: readBody,
expectedResponse: expectedResponse,
}
}
var serverExpectTests = []serverExpectTest{
// Normal 100-continues, case-insensitive.
expectTest(100, "100-continue", true, "100 Continue"),
expectTest(100, "100-cOntInUE", true, "100 Continue"),
// No 100-continue.
expectTest(100, "", true, "200 OK"),
// 100-continue but requesting client to deny us,
// so it never reads the body.
expectTest(100, "100-continue", false, "401 Unauthorized"),
// Likewise without 100-continue:
expectTest(100, "", false, "401 Unauthorized"),
// Non-standard expectations are failures
expectTest(0, "a-pony", false, "417 Expectation Failed"),
// Expect-100 requested but no body (is apparently okay: Issue 7625)
expectTest(0, "100-continue", true, "200 OK"),
// Expect-100 requested but handler doesn't read the body
expectTest(0, "100-continue", false, "401 Unauthorized"),
// Expect-100 continue with no body, but a chunked body.
{
expectation: "100-continue",
readBody: true,
chunked: true,
expectedResponse: "100 Continue",
},
}
// Tests that the server responds to the "Expect" request header
// correctly.
// http2 test: TestServer_Response_Automatic100Continue
func TestServerExpect(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
// Note using r.FormValue("readbody") because for POST
// requests that would read from r.Body, which we only
// conditionally want to do.
if strings.Contains(r.URL.RawQuery, "readbody=true") {
ioutil.ReadAll(r.Body)
w.Write([]byte("Hi"))
} else {
w.WriteHeader(StatusUnauthorized)
}
}))
defer ts.Close()
runTest := func(test serverExpectTest) {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer conn.Close()
// Only send the body immediately if we're acting like an HTTP client
// that doesn't send 100-continue expectations.
writeBody := test.contentLength != 0 && strings.ToLower(test.expectation) != "100-continue"
go func() {
contentLen := fmt.Sprintf("Content-Length: %d", test.contentLength)
if test.chunked {
contentLen = "Transfer-Encoding: chunked"
}
_, err := fmt.Fprintf(conn, "POST /?readbody=%v HTTP/1.1\r\n"+
"Connection: close\r\n"+
"%s\r\n"+
"Expect: %s\r\nHost: foo\r\n\r\n",
test.readBody, contentLen, test.expectation)
if err != nil {
t.Errorf("On test %#v, error writing request headers: %v", test, err)
return
}
if writeBody {
var targ io.WriteCloser = struct {
io.Writer
io.Closer
}{
conn,
ioutil.NopCloser(nil),
}
if test.chunked {
targ = httputil.NewChunkedWriter(conn)
}
body := strings.Repeat("A", test.contentLength)
_, err = fmt.Fprint(targ, body)
if err == nil {
err = targ.Close()
}
if err != nil {
if !test.readBody {
// Server likely already hung up on us.
// See larger comment below.
t.Logf("On test %#v, acceptable error writing request body: %v", test, err)
return
}
t.Errorf("On test %#v, error writing request body: %v", test, err)
}
}
}()
bufr := bufio.NewReader(conn)
line, err := bufr.ReadString('\n')
if err != nil {
if writeBody && !test.readBody {
// This is an acceptable failure due to a possible TCP race:
// We were still writing data and the server hung up on us. A TCP
// implementation may send a RST if our request body data was known
// to be lost, which may trigger our reads to fail.
// See RFC 1122 page 88.
t.Logf("On test %#v, acceptable error from ReadString: %v", test, err)
return
}
t.Fatalf("On test %#v, ReadString: %v", test, err)
}
if !strings.Contains(line, test.expectedResponse) {
t.Errorf("On test %#v, got first line = %q; want %q", test, line, test.expectedResponse)
}
}
for _, test := range serverExpectTests {
runTest(test)
}
}
// Under a ~256KB (maxPostHandlerReadBytes) threshold, the server
// should consume client request bodies that a handler didn't read.
func TestServerUnreadRequestBodyLittle(t *testing.T) {
defer afterTest(t)
conn := new(testConn)
body := strings.Repeat("x", 100<<10)
conn.readBuf.Write([]byte(fmt.Sprintf(
"POST / HTTP/1.1\r\n"+
"Host: test\r\n"+
"Content-Length: %d\r\n"+
"\r\n", len(body))))
conn.readBuf.Write([]byte(body))
done := make(chan bool)
readBufLen := func() int {
conn.readMu.Lock()
defer conn.readMu.Unlock()
return conn.readBuf.Len()
}
ls := &oneConnListener{conn}
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
defer close(done)
if bufLen := readBufLen(); bufLen < len(body)/2 {
t.Errorf("on request, read buffer length is %d; expected about 100 KB", bufLen)
}
rw.WriteHeader(200)
rw.(Flusher).Flush()
if g, e := readBufLen(), 0; g != e {
t.Errorf("after WriteHeader, read buffer length is %d; want %d", g, e)
}
if c := rw.Header().Get("Connection"); c != "" {
t.Errorf(`Connection header = %q; want ""`, c)
}
}))
<-done
}
// Over a ~256KB (maxPostHandlerReadBytes) threshold, the server
// should ignore client request bodies that a handler didn't read
// and close the connection.
func TestServerUnreadRequestBodyLarge(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Log("skipping in short mode")
}
conn := new(testConn)
body := strings.Repeat("x", 1<<20)
conn.readBuf.Write([]byte(fmt.Sprintf(
"POST / HTTP/1.1\r\n"+
"Host: test\r\n"+
"Content-Length: %d\r\n"+
"\r\n", len(body))))
conn.readBuf.Write([]byte(body))
conn.closec = make(chan bool, 1)
ls := &oneConnListener{conn}
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
if conn.readBuf.Len() < len(body)/2 {
t.Errorf("on request, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
}
rw.WriteHeader(200)
rw.(Flusher).Flush()
if conn.readBuf.Len() < len(body)/2 {
t.Errorf("post-WriteHeader, read buffer length is %d; expected about 1MB", conn.readBuf.Len())
}
}))
<-conn.closec
if res := conn.writeBuf.String(); !strings.Contains(res, "Connection: close") {
t.Errorf("Expected a Connection: close header; got response: %s", res)
}
}
type handlerBodyCloseTest struct {
bodySize int
bodyChunked bool
reqConnClose bool
wantEOFSearch bool // should Handler's Body.Close do Reads, looking for EOF?
wantNextReq bool // should it find the next request on the same conn?
}
func (t handlerBodyCloseTest) connectionHeader() string {
if t.reqConnClose {
return "Connection: close\r\n"
}
return ""
}
var handlerBodyCloseTests = [...]handlerBodyCloseTest{
// Small enough to slurp past to the next request +
// has Content-Length.
0: {
bodySize: 20 << 10,
bodyChunked: false,
reqConnClose: false,
wantEOFSearch: true,
wantNextReq: true,
},
// Small enough to slurp past to the next request +
// is chunked.
1: {
bodySize: 20 << 10,
bodyChunked: true,
reqConnClose: false,
wantEOFSearch: true,
wantNextReq: true,
},
// Small enough to slurp past to the next request +
// has Content-Length +
// declares Connection: close (so pointless to read more).
2: {
bodySize: 20 << 10,
bodyChunked: false,
reqConnClose: true,
wantEOFSearch: false,
wantNextReq: false,
},
// Small enough to slurp past to the next request +
// declares Connection: close,
// but chunked, so it might have trailers.
// TODO: maybe skip this search if no trailers were declared
// in the headers.
3: {
bodySize: 20 << 10,
bodyChunked: true,
reqConnClose: true,
wantEOFSearch: true,
wantNextReq: false,
},
// Big with Content-Length, so give up immediately if we know it's too big.
4: {
bodySize: 1 << 20,
bodyChunked: false, // has a Content-Length
reqConnClose: false,
wantEOFSearch: false,
wantNextReq: false,
},
// Big chunked, so read a bit before giving up.
5: {
bodySize: 1 << 20,
bodyChunked: true,
reqConnClose: false,
wantEOFSearch: true,
wantNextReq: false,
},
// Big with Connection: close, but chunked, so search for trailers.
// TODO: maybe skip this search if no trailers were declared
// in the headers.
6: {
bodySize: 1 << 20,
bodyChunked: true,
reqConnClose: true,
wantEOFSearch: true,
wantNextReq: false,
},
// Big with Connection: close, so don't do any reads on Close.
// With Content-Length.
7: {
bodySize: 1 << 20,
bodyChunked: false,
reqConnClose: true,
wantEOFSearch: false,
wantNextReq: false,
},
}
func TestHandlerBodyClose(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Skip("skipping in -short mode")
}
for i, tt := range handlerBodyCloseTests {
testHandlerBodyClose(t, i, tt)
}
}
func testHandlerBodyClose(t *testing.T, i int, tt handlerBodyCloseTest) {
conn := new(testConn)
body := strings.Repeat("x", tt.bodySize)
if tt.bodyChunked {
conn.readBuf.WriteString("POST / HTTP/1.1\r\n" +
"Host: test\r\n" +
tt.connectionHeader() +
"Transfer-Encoding: chunked\r\n" +
"\r\n")
cw := internal.NewChunkedWriter(&conn.readBuf)
io.WriteString(cw, body)
cw.Close()
conn.readBuf.WriteString("\r\n")
} else {
conn.readBuf.Write([]byte(fmt.Sprintf(
"POST / HTTP/1.1\r\n"+
"Host: test\r\n"+
tt.connectionHeader()+
"Content-Length: %d\r\n"+
"\r\n", len(body))))
conn.readBuf.Write([]byte(body))
}
if !tt.reqConnClose {
conn.readBuf.WriteString("GET / HTTP/1.1\r\nHost: test\r\n\r\n")
}
conn.closec = make(chan bool, 1)
readBufLen := func() int {
conn.readMu.Lock()
defer conn.readMu.Unlock()
return conn.readBuf.Len()
}
ls := &oneConnListener{conn}
var numReqs int
var size0, size1 int
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
numReqs++
if numReqs == 1 {
size0 = readBufLen()
req.Body.Close()
size1 = readBufLen()
}
}))
<-conn.closec
if numReqs < 1 || numReqs > 2 {
t.Fatalf("%d. bug in test. unexpected number of requests = %d", i, numReqs)
}
didSearch := size0 != size1
if didSearch != tt.wantEOFSearch {
t.Errorf("%d. did EOF search = %v; want %v (size went from %d to %d)", i, didSearch, !didSearch, size0, size1)
}
if tt.wantNextReq && numReqs != 2 {
t.Errorf("%d. numReq = %d; want 2", i, numReqs)
}
}
// testHandlerBodyConsumer represents a function injected into a test handler to
// vary work done on a request Body.
type testHandlerBodyConsumer struct {
name string
f func(io.ReadCloser)
}
var testHandlerBodyConsumers = []testHandlerBodyConsumer{
{"nil", func(io.ReadCloser) {}},
{"close", func(r io.ReadCloser) { r.Close() }},
{"discard", func(r io.ReadCloser) { io.Copy(ioutil.Discard, r) }},
}
func TestRequestBodyReadErrorClosesConnection(t *testing.T) {
defer afterTest(t)
for _, handler := range testHandlerBodyConsumers {
conn := new(testConn)
conn.readBuf.WriteString("POST /public HTTP/1.1\r\n" +
"Host: test\r\n" +
"Transfer-Encoding: chunked\r\n" +
"\r\n" +
"hax\r\n" + // Invalid chunked encoding
"GET /secret HTTP/1.1\r\n" +
"Host: test\r\n" +
"\r\n")
conn.closec = make(chan bool, 1)
ls := &oneConnListener{conn}
var numReqs int
go Serve(ls, HandlerFunc(func(_ ResponseWriter, req *Request) {
numReqs++
if strings.Contains(req.URL.Path, "secret") {
t.Error("Request for /secret encountered, should not have happened.")
}
handler.f(req.Body)
}))
<-conn.closec
if numReqs != 1 {
t.Errorf("Handler %v: got %d reqs; want 1", handler.name, numReqs)
}
}
}
func TestInvalidTrailerClosesConnection(t *testing.T) {
defer afterTest(t)
for _, handler := range testHandlerBodyConsumers {
conn := new(testConn)
conn.readBuf.WriteString("POST /public HTTP/1.1\r\n" +
"Host: test\r\n" +
"Trailer: hack\r\n" +
"Transfer-Encoding: chunked\r\n" +
"\r\n" +
"3\r\n" +
"hax\r\n" +
"0\r\n" +
"I'm not a valid trailer\r\n" +
"GET /secret HTTP/1.1\r\n" +
"Host: test\r\n" +
"\r\n")
conn.closec = make(chan bool, 1)
ln := &oneConnListener{conn}
var numReqs int
go Serve(ln, HandlerFunc(func(_ ResponseWriter, req *Request) {
numReqs++
if strings.Contains(req.URL.Path, "secret") {
t.Errorf("Handler %s, Request for /secret encountered, should not have happened.", handler.name)
}
handler.f(req.Body)
}))
<-conn.closec
if numReqs != 1 {
t.Errorf("Handler %s: got %d reqs; want 1", handler.name, numReqs)
}
}
}
// slowTestConn is a net.Conn that provides a means to simulate parts of a
// request being received piecemeal. Deadlines can be set and enforced in both
// Read and Write.
type slowTestConn struct {
// over multiple calls to Read, time.Durations are slept, strings are read.
script []interface{}
closec chan bool
mu sync.Mutex // guards rd/wd
rd, wd time.Time // read, write deadline
noopConn
}
func (c *slowTestConn) SetDeadline(t time.Time) error {
c.SetReadDeadline(t)
c.SetWriteDeadline(t)
return nil
}
func (c *slowTestConn) SetReadDeadline(t time.Time) error {
c.mu.Lock()
defer c.mu.Unlock()
c.rd = t
return nil
}
func (c *slowTestConn) SetWriteDeadline(t time.Time) error {
c.mu.Lock()
defer c.mu.Unlock()
c.wd = t
return nil
}
func (c *slowTestConn) Read(b []byte) (n int, err error) {
c.mu.Lock()
defer c.mu.Unlock()
restart:
if !c.rd.IsZero() && time.Now().After(c.rd) {
return 0, syscall.ETIMEDOUT
}
if len(c.script) == 0 {
return 0, io.EOF
}
switch cue := c.script[0].(type) {
case time.Duration:
if !c.rd.IsZero() {
// If the deadline falls in the middle of our sleep window, deduct
// part of the sleep, then return a timeout.
if remaining := c.rd.Sub(time.Now()); remaining < cue {
c.script[0] = cue - remaining
time.Sleep(remaining)
return 0, syscall.ETIMEDOUT
}
}
c.script = c.script[1:]
time.Sleep(cue)
goto restart
case string:
n = copy(b, cue)
// If cue is too big for the buffer, leave the end for the next Read.
if len(cue) > n {
c.script[0] = cue[n:]
} else {
c.script = c.script[1:]
}
default:
panic("unknown cue in slowTestConn script")
}
return
}
func (c *slowTestConn) Close() error {
select {
case c.closec <- true:
default:
}
return nil
}
func (c *slowTestConn) Write(b []byte) (int, error) {
if !c.wd.IsZero() && time.Now().After(c.wd) {
return 0, syscall.ETIMEDOUT
}
return len(b), nil
}
func TestRequestBodyTimeoutClosesConnection(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer afterTest(t)
for _, handler := range testHandlerBodyConsumers {
conn := &slowTestConn{
script: []interface{}{
"POST /public HTTP/1.1\r\n" +
"Host: test\r\n" +
"Content-Length: 10000\r\n" +
"\r\n",
"foo bar baz",
600 * time.Millisecond, // Request deadline should hit here
"GET /secret HTTP/1.1\r\n" +
"Host: test\r\n" +
"\r\n",
},
closec: make(chan bool, 1),
}
ls := &oneConnListener{conn}
var numReqs int
s := Server{
Handler: HandlerFunc(func(_ ResponseWriter, req *Request) {
numReqs++
if strings.Contains(req.URL.Path, "secret") {
t.Error("Request for /secret encountered, should not have happened.")
}
handler.f(req.Body)
}),
ReadTimeout: 400 * time.Millisecond,
}
go s.Serve(ls)
<-conn.closec
if numReqs != 1 {
t.Errorf("Handler %v: got %d reqs; want 1", handler.name, numReqs)
}
}
}
func TestTimeoutHandler_h1(t *testing.T) { testTimeoutHandler(t, h1Mode) }
func TestTimeoutHandler_h2(t *testing.T) { testTimeoutHandler(t, h2Mode) }
func testTimeoutHandler(t *testing.T, h2 bool) {
defer afterTest(t)
sendHi := make(chan bool, 1)
writeErrors := make(chan error, 1)
sayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
<-sendHi
_, werr := w.Write([]byte("hi"))
writeErrors <- werr
})
timeout := make(chan time.Time, 1) // write to this to force timeouts
cst := newClientServerTest(t, h2, NewTestTimeoutHandler(sayHi, timeout))
defer cst.close()
// Succeed without timing out:
sendHi <- true
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
}
if g, e := res.StatusCode, StatusOK; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
body, _ := ioutil.ReadAll(res.Body)
if g, e := string(body), "hi"; g != e {
t.Errorf("got body %q; expected %q", g, e)
}
if g := <-writeErrors; g != nil {
t.Errorf("got unexpected Write error on first request: %v", g)
}
// Times out:
timeout <- time.Time{}
res, err = cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
}
if g, e := res.StatusCode, StatusServiceUnavailable; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
body, _ = ioutil.ReadAll(res.Body)
if !strings.Contains(string(body), "<title>Timeout</title>") {
t.Errorf("expected timeout body; got %q", string(body))
}
// Now make the previously-timed out handler speak again,
// which verifies the panic is handled:
sendHi <- true
if g, e := <-writeErrors, ErrHandlerTimeout; g != e {
t.Errorf("expected Write error of %v; got %v", e, g)
}
}
// See issues 8209 and 8414.
func TestTimeoutHandlerRace(t *testing.T) {
defer afterTest(t)
delayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
ms, _ := strconv.Atoi(r.URL.Path[1:])
if ms == 0 {
ms = 1
}
for i := 0; i < ms; i++ {
w.Write([]byte("hi"))
time.Sleep(time.Millisecond)
}
})
ts := httptest.NewServer(TimeoutHandler(delayHi, 20*time.Millisecond, ""))
defer ts.Close()
var wg sync.WaitGroup
gate := make(chan bool, 10)
n := 50
if testing.Short() {
n = 10
gate = make(chan bool, 3)
}
for i := 0; i < n; i++ {
gate <- true
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-gate }()
res, err := Get(fmt.Sprintf("%s/%d", ts.URL, rand.Intn(50)))
if err == nil {
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
}
}()
}
wg.Wait()
}
// See issues 8209 and 8414.
func TestTimeoutHandlerRaceHeader(t *testing.T) {
defer afterTest(t)
delay204 := HandlerFunc(func(w ResponseWriter, r *Request) {
w.WriteHeader(204)
})
ts := httptest.NewServer(TimeoutHandler(delay204, time.Nanosecond, ""))
defer ts.Close()
var wg sync.WaitGroup
gate := make(chan bool, 50)
n := 500
if testing.Short() {
n = 10
}
for i := 0; i < n; i++ {
gate <- true
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-gate }()
res, err := Get(ts.URL)
if err != nil {
t.Error(err)
return
}
defer res.Body.Close()
io.Copy(ioutil.Discard, res.Body)
}()
}
wg.Wait()
}
// Issue 9162
func TestTimeoutHandlerRaceHeaderTimeout(t *testing.T) {
defer afterTest(t)
sendHi := make(chan bool, 1)
writeErrors := make(chan error, 1)
sayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Type", "text/plain")
<-sendHi
_, werr := w.Write([]byte("hi"))
writeErrors <- werr
})
timeout := make(chan time.Time, 1) // write to this to force timeouts
cst := newClientServerTest(t, h1Mode, NewTestTimeoutHandler(sayHi, timeout))
defer cst.close()
// Succeed without timing out:
sendHi <- true
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
}
if g, e := res.StatusCode, StatusOK; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
body, _ := ioutil.ReadAll(res.Body)
if g, e := string(body), "hi"; g != e {
t.Errorf("got body %q; expected %q", g, e)
}
if g := <-writeErrors; g != nil {
t.Errorf("got unexpected Write error on first request: %v", g)
}
// Times out:
timeout <- time.Time{}
res, err = cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
}
if g, e := res.StatusCode, StatusServiceUnavailable; g != e {
t.Errorf("got res.StatusCode %d; expected %d", g, e)
}
body, _ = ioutil.ReadAll(res.Body)
if !strings.Contains(string(body), "<title>Timeout</title>") {
t.Errorf("expected timeout body; got %q", string(body))
}
// Now make the previously-timed out handler speak again,
// which verifies the panic is handled:
sendHi <- true
if g, e := <-writeErrors, ErrHandlerTimeout; g != e {
t.Errorf("expected Write error of %v; got %v", e, g)
}
}
// Issue 14568.
func TestTimeoutHandlerStartTimerWhenServing(t *testing.T) {
if testing.Short() {
t.Skip("skipping sleeping test in -short mode")
}
defer afterTest(t)
var handler HandlerFunc = func(w ResponseWriter, _ *Request) {
w.WriteHeader(StatusNoContent)
}
timeout := 300 * time.Millisecond
ts := httptest.NewServer(TimeoutHandler(handler, timeout, ""))
defer ts.Close()
// Issue was caused by the timeout handler starting the timer when
// was created, not when the request. So wait for more than the timeout
// to ensure that's not the case.
time.Sleep(2 * timeout)
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode != StatusNoContent {
t.Errorf("got res.StatusCode %d, want %v", res.StatusCode, StatusNoContent)
}
}
// Verifies we don't path.Clean() on the wrong parts in redirects.
func TestRedirectMunging(t *testing.T) {
req, _ := NewRequest("GET", "http://example.com/", nil)
resp := httptest.NewRecorder()
Redirect(resp, req, "/foo?next=http://bar.com/", 302)
if g, e := resp.Header().Get("Location"), "/foo?next=http://bar.com/"; g != e {
t.Errorf("Location header was %q; want %q", g, e)
}
resp = httptest.NewRecorder()
Redirect(resp, req, "http://localhost:8080/_ah/login?continue=http://localhost:8080/", 302)
if g, e := resp.Header().Get("Location"), "http://localhost:8080/_ah/login?continue=http://localhost:8080/"; g != e {
t.Errorf("Location header was %q; want %q", g, e)
}
}
func TestRedirectBadPath(t *testing.T) {
// This used to crash. It's not valid input (bad path), but it
// shouldn't crash.
rr := httptest.NewRecorder()
req := &Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Path: "not-empty-but-no-leading-slash", // bogus
},
}
Redirect(rr, req, "", 304)
if rr.Code != 304 {
t.Errorf("Code = %d; want 304", rr.Code)
}
}
// Test different URL formats and schemes
func TestRedirectURLFormat(t *testing.T) {
req, _ := NewRequest("GET", "http://example.com/qux/", nil)
var tests = []struct {
in string
want string
}{
// normal http
{"http://foobar.com/baz", "http://foobar.com/baz"},
// normal https
{"https://foobar.com/baz", "https://foobar.com/baz"},
// custom scheme
{"test://foobar.com/baz", "test://foobar.com/baz"},
// schemeless
{"//foobar.com/baz", "//foobar.com/baz"},
// relative to the root
{"/foobar.com/baz", "/foobar.com/baz"},
// relative to the current path
{"foobar.com/baz", "/qux/foobar.com/baz"},
// relative to the current path (+ going upwards)
{"../quux/foobar.com/baz", "/quux/foobar.com/baz"},
// incorrect number of slashes
{"///foobar.com/baz", "/foobar.com/baz"},
}
for _, tt := range tests {
rec := httptest.NewRecorder()
Redirect(rec, req, tt.in, 302)
if got := rec.Header().Get("Location"); got != tt.want {
t.Errorf("Redirect(%q) generated Location header %q; want %q", tt.in, got, tt.want)
}
}
}
// TestZeroLengthPostAndResponse exercises an optimization done by the Transport:
// when there is no body (either because the method doesn't permit a body, or an
// explicit Content-Length of zero is present), then the transport can re-use the
// connection immediately. But when it re-uses the connection, it typically closes
// the previous request's body, which is not optimal for zero-lengthed bodies,
// as the client would then see http.ErrBodyReadAfterClose and not 0, io.EOF.
func TestZeroLengthPostAndResponse_h1(t *testing.T) {
testZeroLengthPostAndResponse(t, h1Mode)
}
func TestZeroLengthPostAndResponse_h2(t *testing.T) {
testZeroLengthPostAndResponse(t, h2Mode)
}
func testZeroLengthPostAndResponse(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, r *Request) {
all, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("handler ReadAll: %v", err)
}
if len(all) != 0 {
t.Errorf("handler got %d bytes; expected 0", len(all))
}
rw.Header().Set("Content-Length", "0")
}))
defer cst.close()
req, err := NewRequest("POST", cst.ts.URL, strings.NewReader(""))
if err != nil {
t.Fatal(err)
}
req.ContentLength = 0
var resp [5]*Response
for i := range resp {
resp[i], err = cst.c.Do(req)
if err != nil {
t.Fatalf("client post #%d: %v", i, err)
}
}
for i := range resp {
all, err := ioutil.ReadAll(resp[i].Body)
if err != nil {
t.Fatalf("req #%d: client ReadAll: %v", i, err)
}
if len(all) != 0 {
t.Errorf("req #%d: client got %d bytes; expected 0", i, len(all))
}
}
}
func TestHandlerPanicNil_h1(t *testing.T) { testHandlerPanic(t, false, h1Mode, nil) }
func TestHandlerPanicNil_h2(t *testing.T) { testHandlerPanic(t, false, h2Mode, nil) }
func TestHandlerPanic_h1(t *testing.T) {
testHandlerPanic(t, false, h1Mode, "intentional death for testing")
}
func TestHandlerPanic_h2(t *testing.T) {
testHandlerPanic(t, false, h2Mode, "intentional death for testing")
}
func TestHandlerPanicWithHijack(t *testing.T) {
// Only testing HTTP/1, and our http2 server doesn't support hijacking.
testHandlerPanic(t, true, h1Mode, "intentional death for testing")
}
func testHandlerPanic(t *testing.T, withHijack, h2 bool, panicValue interface{}) {
defer afterTest(t)
// Unlike the other tests that set the log output to ioutil.Discard
// to quiet the output, this test uses a pipe. The pipe serves three
// purposes:
//
// 1) The log.Print from the http server (generated by the caught
// panic) will go to the pipe instead of stderr, making the
// output quiet.
//
// 2) We read from the pipe to verify that the handler
// actually caught the panic and logged something.
//
// 3) The blocking Read call prevents this TestHandlerPanic
// function from exiting before the HTTP server handler
// finishes crashing. If this text function exited too
// early (and its defer log.SetOutput(os.Stderr) ran),
// then the crash output could spill into the next test.
pr, pw := io.Pipe()
log.SetOutput(pw)
defer log.SetOutput(os.Stderr)
defer pw.Close()
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
if withHijack {
rwc, _, err := w.(Hijacker).Hijack()
if err != nil {
t.Logf("unexpected error: %v", err)
}
defer rwc.Close()
}
panic(panicValue)
}))
defer cst.close()
// Do a blocking read on the log output pipe so its logging
// doesn't bleed into the next test. But wait only 5 seconds
// for it.
done := make(chan bool, 1)
go func() {
buf := make([]byte, 4<<10)
_, err := pr.Read(buf)
pr.Close()
if err != nil && err != io.EOF {
t.Error(err)
}
done <- true
}()
_, err := cst.c.Get(cst.ts.URL)
if err == nil {
t.Logf("expected an error")
}
if panicValue == nil {
return
}
select {
case <-done:
return
case <-time.After(5 * time.Second):
t.Fatal("expected server handler to log an error")
}
}
func TestServerNoDate_h1(t *testing.T) { testServerNoHeader(t, h1Mode, "Date") }
func TestServerNoDate_h2(t *testing.T) { testServerNoHeader(t, h2Mode, "Date") }
func TestServerNoContentType_h1(t *testing.T) { testServerNoHeader(t, h1Mode, "Content-Type") }
func TestServerNoContentType_h2(t *testing.T) { testServerNoHeader(t, h2Mode, "Content-Type") }
func testServerNoHeader(t *testing.T, h2 bool, header string) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header()[header] = nil
io.WriteString(w, "<html>foo</html>") // non-empty
}))
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if got, ok := res.Header[header]; ok {
t.Fatalf("Expected no %s header; got %q", header, got)
}
}
func TestStripPrefix(t *testing.T) {
defer afterTest(t)
h := HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("X-Path", r.URL.Path)
})
ts := httptest.NewServer(StripPrefix("/foo", h))
defer ts.Close()
res, err := Get(ts.URL + "/foo/bar")
if err != nil {
t.Fatal(err)
}
if g, e := res.Header.Get("X-Path"), "/bar"; g != e {
t.Errorf("test 1: got %s, want %s", g, e)
}
res.Body.Close()
res, err = Get(ts.URL + "/bar")
if err != nil {
t.Fatal(err)
}
if g, e := res.StatusCode, 404; g != e {
t.Errorf("test 2: got status %v, want %v", g, e)
}
res.Body.Close()
}
func TestRequestLimit_h1(t *testing.T) { testRequestLimit(t, h1Mode) }
func TestRequestLimit_h2(t *testing.T) { testRequestLimit(t, h2Mode) }
func testRequestLimit(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
t.Fatalf("didn't expect to get request in Handler")
}))
defer cst.close()
req, _ := NewRequest("GET", cst.ts.URL, nil)
var bytesPerHeader = len("header12345: val12345\r\n")
for i := 0; i < ((DefaultMaxHeaderBytes+4096)/bytesPerHeader)+1; i++ {
req.Header.Set(fmt.Sprintf("header%05d", i), fmt.Sprintf("val%05d", i))
}
res, err := cst.c.Do(req)
if err != nil {
// Some HTTP clients may fail on this undefined behavior (server replying and
// closing the connection while the request is still being written), but
// we do support it (at least currently), so we expect a response below.
t.Fatalf("Do: %v", err)
}
defer res.Body.Close()
if res.StatusCode != 431 {
t.Fatalf("expected 431 response status; got: %d %s", res.StatusCode, res.Status)
}
}
type neverEnding byte
func (b neverEnding) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(b)
}
return len(p), nil
}
type countReader struct {
r io.Reader
n *int64
}
func (cr countReader) Read(p []byte) (n int, err error) {
n, err = cr.r.Read(p)
atomic.AddInt64(cr.n, int64(n))
return
}
func TestRequestBodyLimit_h1(t *testing.T) { testRequestBodyLimit(t, h1Mode) }
func TestRequestBodyLimit_h2(t *testing.T) { testRequestBodyLimit(t, h2Mode) }
func testRequestBodyLimit(t *testing.T, h2 bool) {
defer afterTest(t)
const limit = 1 << 20
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
r.Body = MaxBytesReader(w, r.Body, limit)
n, err := io.Copy(ioutil.Discard, r.Body)
if err == nil {
t.Errorf("expected error from io.Copy")
}
if n != limit {
t.Errorf("io.Copy = %d, want %d", n, limit)
}
}))
defer cst.close()
nWritten := new(int64)
req, _ := NewRequest("POST", cst.ts.URL, io.LimitReader(countReader{neverEnding('a'), nWritten}, limit*200))
// Send the POST, but don't care it succeeds or not. The
// remote side is going to reply and then close the TCP
// connection, and HTTP doesn't really define if that's
// allowed or not. Some HTTP clients will get the response
// and some (like ours, currently) will complain that the
// request write failed, without reading the response.
//
// But that's okay, since what we're really testing is that
// the remote side hung up on us before we wrote too much.
_, _ = cst.c.Do(req)
if atomic.LoadInt64(nWritten) > limit*100 {
t.Errorf("handler restricted the request body to %d bytes, but client managed to write %d",
limit, nWritten)
}
}
// TestClientWriteShutdown tests that if the client shuts down the write
// side of their TCP connection, the server doesn't send a 400 Bad Request.
func TestClientWriteShutdown(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
err = conn.(*net.TCPConn).CloseWrite()
if err != nil {
t.Fatalf("Dial: %v", err)
}
donec := make(chan bool)
go func() {
defer close(donec)
bs, err := ioutil.ReadAll(conn)
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
got := string(bs)
if got != "" {
t.Errorf("read %q from server; want nothing", got)
}
}()
select {
case <-donec:
case <-time.After(10 * time.Second):
t.Fatalf("timeout")
}
}
// Tests that chunked server responses that write 1 byte at a time are
// buffered before chunk headers are added, not after chunk headers.
func TestServerBufferedChunking(t *testing.T) {
conn := new(testConn)
conn.readBuf.Write([]byte("GET / HTTP/1.1\r\nHost: foo\r\n\r\n"))
conn.closec = make(chan bool, 1)
ls := &oneConnListener{conn}
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
rw.(Flusher).Flush() // force the Header to be sent, in chunking mode, not counting the length
rw.Write([]byte{'x'})
rw.Write([]byte{'y'})
rw.Write([]byte{'z'})
}))
<-conn.closec
if !bytes.HasSuffix(conn.writeBuf.Bytes(), []byte("\r\n\r\n3\r\nxyz\r\n0\r\n\r\n")) {
t.Errorf("response didn't end with a single 3 byte 'xyz' chunk; got:\n%q",
conn.writeBuf.Bytes())
}
}
// Tests that the server flushes its response headers out when it's
// ignoring the response body and waits a bit before forcefully
// closing the TCP connection, causing the client to get a RST.
// See https://golang.org/issue/3595
func TestServerGracefulClose(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
Error(w, "bye", StatusUnauthorized)
}))
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
const bodySize = 5 << 20
req := []byte(fmt.Sprintf("POST / HTTP/1.1\r\nHost: foo.com\r\nContent-Length: %d\r\n\r\n", bodySize))
for i := 0; i < bodySize; i++ {
req = append(req, 'x')
}
writeErr := make(chan error)
go func() {
_, err := conn.Write(req)
writeErr <- err
}()
br := bufio.NewReader(conn)
lineNum := 0
for {
line, err := br.ReadString('\n')
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("ReadLine: %v", err)
}
lineNum++
if lineNum == 1 && !strings.Contains(line, "401 Unauthorized") {
t.Errorf("Response line = %q; want a 401", line)
}
}
// Wait for write to finish. This is a broken pipe on both
// Darwin and Linux, but checking this isn't the point of
// the test.
<-writeErr
}
func TestCaseSensitiveMethod_h1(t *testing.T) { testCaseSensitiveMethod(t, h1Mode) }
func TestCaseSensitiveMethod_h2(t *testing.T) { testCaseSensitiveMethod(t, h2Mode) }
func testCaseSensitiveMethod(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
if r.Method != "get" {
t.Errorf(`Got method %q; want "get"`, r.Method)
}
}))
defer cst.close()
req, _ := NewRequest("get", cst.ts.URL, nil)
res, err := cst.c.Do(req)
if err != nil {
t.Error(err)
return
}
res.Body.Close()
}
// TestContentLengthZero tests that for both an HTTP/1.0 and HTTP/1.1
// request (both keep-alive), when a Handler never writes any
// response, the net/http package adds a "Content-Length: 0" response
// header.
func TestContentLengthZero(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {}))
defer ts.Close()
for _, version := range []string{"HTTP/1.0", "HTTP/1.1"} {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("error dialing: %v", err)
}
_, err = fmt.Fprintf(conn, "GET / %v\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n", version)
if err != nil {
t.Fatalf("error writing: %v", err)
}
req, _ := NewRequest("GET", "/", nil)
res, err := ReadResponse(bufio.NewReader(conn), req)
if err != nil {
t.Fatalf("error reading response: %v", err)
}
if te := res.TransferEncoding; len(te) > 0 {
t.Errorf("For version %q, Transfer-Encoding = %q; want none", version, te)
}
if cl := res.ContentLength; cl != 0 {
t.Errorf("For version %q, Content-Length = %v; want 0", version, cl)
}
conn.Close()
}
}
func TestCloseNotifier(t *testing.T) {
defer afterTest(t)
gotReq := make(chan bool, 1)
sawClose := make(chan bool, 1)
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
gotReq <- true
cc := rw.(CloseNotifier).CloseNotify()
<-cc
sawClose <- true
}))
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("error dialing: %v", err)
}
diec := make(chan bool)
go func() {
_, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n")
if err != nil {
t.Fatal(err)
}
<-diec
conn.Close()
}()
For:
for {
select {
case <-gotReq:
diec <- true
case <-sawClose:
break For
case <-time.After(5 * time.Second):
t.Fatal("timeout")
}
}
ts.Close()
}
// Tests that a pipelined request causes the first request's Handler's CloseNotify
// channel to fire. Previously it deadlocked.
//
// Issue 13165
func TestCloseNotifierPipelined(t *testing.T) {
defer afterTest(t)
gotReq := make(chan bool, 2)
sawClose := make(chan bool, 2)
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
gotReq <- true
cc := rw.(CloseNotifier).CloseNotify()
<-cc
sawClose <- true
}))
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("error dialing: %v", err)
}
diec := make(chan bool, 1)
go func() {
const req = "GET / HTTP/1.1\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n"
_, err = io.WriteString(conn, req+req) // two requests
if err != nil {
t.Fatal(err)
}
<-diec
conn.Close()
}()
reqs := 0
closes := 0
For:
for {
select {
case <-gotReq:
reqs++
if reqs > 2 {
t.Fatal("too many requests")
} else if reqs > 1 {
diec <- true
}
case <-sawClose:
closes++
if closes > 1 {
break For
}
case <-time.After(5 * time.Second):
ts.CloseClientConnections()
t.Fatal("timeout")
}
}
ts.Close()
}
func TestCloseNotifierChanLeak(t *testing.T) {
defer afterTest(t)
req := reqBytes("GET / HTTP/1.0\nHost: golang.org")
for i := 0; i < 20; i++ {
var output bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &output,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
// Ignore the return value and never read from
// it, testing that we don't leak goroutines
// on the sending side:
_ = rw.(CloseNotifier).CloseNotify()
})
go Serve(ln, handler)
<-conn.closec
}
}
// Tests that we can use CloseNotifier in one request, and later call Hijack
// on a second request on the same connection.
//
// It also tests that the connReader stitches together its background
// 1-byte read for CloseNotifier when CloseNotifier doesn't fire with
// the rest of the second HTTP later.
//
// Issue 9763.
// HTTP/1-only test. (http2 doesn't have Hijack)
func TestHijackAfterCloseNotifier(t *testing.T) {
defer afterTest(t)
script := make(chan string, 2)
script <- "closenotify"
script <- "hijack"
close(script)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
plan := <-script
switch plan {
default:
panic("bogus plan; too many requests")
case "closenotify":
w.(CloseNotifier).CloseNotify() // discard result
w.Header().Set("X-Addr", r.RemoteAddr)
case "hijack":
c, _, err := w.(Hijacker).Hijack()
if err != nil {
t.Errorf("Hijack in Handler: %v", err)
return
}
if _, ok := c.(*net.TCPConn); !ok {
// Verify it's not wrapped in some type.
// Not strictly a go1 compat issue, but in practice it probably is.
t.Errorf("type of hijacked conn is %T; want *net.TCPConn", c)
}
fmt.Fprintf(c, "HTTP/1.0 200 OK\r\nX-Addr: %v\r\nContent-Length: 0\r\n\r\n", r.RemoteAddr)
c.Close()
return
}
}))
defer ts.Close()
res1, err := Get(ts.URL)
if err != nil {
log.Fatal(err)
}
res2, err := Get(ts.URL)
if err != nil {
log.Fatal(err)
}
addr1 := res1.Header.Get("X-Addr")
addr2 := res2.Header.Get("X-Addr")
if addr1 == "" || addr1 != addr2 {
t.Errorf("addr1, addr2 = %q, %q; want same", addr1, addr2)
}
}
func TestHijackBeforeRequestBodyRead(t *testing.T) {
defer afterTest(t)
var requestBody = bytes.Repeat([]byte("a"), 1<<20)
bodyOkay := make(chan bool, 1)
gotCloseNotify := make(chan bool, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
defer close(bodyOkay) // caller will read false if nothing else
reqBody := r.Body
r.Body = nil // to test that server.go doesn't use this value.
gone := w.(CloseNotifier).CloseNotify()
slurp, err := ioutil.ReadAll(reqBody)
if err != nil {
t.Errorf("Body read: %v", err)
return
}
if len(slurp) != len(requestBody) {
t.Errorf("Backend read %d request body bytes; want %d", len(slurp), len(requestBody))
return
}
if !bytes.Equal(slurp, requestBody) {
t.Error("Backend read wrong request body.") // 1MB; omitting details
return
}
bodyOkay <- true
select {
case <-gone:
gotCloseNotify <- true
case <-time.After(5 * time.Second):
gotCloseNotify <- false
}
}))
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
fmt.Fprintf(conn, "POST / HTTP/1.1\r\nHost: foo\r\nContent-Length: %d\r\n\r\n%s",
len(requestBody), requestBody)
if !<-bodyOkay {
// already failed.
return
}
conn.Close()
if !<-gotCloseNotify {
t.Error("timeout waiting for CloseNotify")
}
}
func TestOptions(t *testing.T) {
uric := make(chan string, 2) // only expect 1, but leave space for 2
mux := NewServeMux()
mux.HandleFunc("/", func(w ResponseWriter, r *Request) {
uric <- r.RequestURI
})
ts := httptest.NewServer(mux)
defer ts.Close()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
// An OPTIONS * request should succeed.
_, err = conn.Write([]byte("OPTIONS * HTTP/1.1\r\nHost: foo.com\r\n\r\n"))
if err != nil {
t.Fatal(err)
}
br := bufio.NewReader(conn)
res, err := ReadResponse(br, &Request{Method: "OPTIONS"})
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Errorf("Got non-200 response to OPTIONS *: %#v", res)
}
// A GET * request on a ServeMux should fail.
_, err = conn.Write([]byte("GET * HTTP/1.1\r\nHost: foo.com\r\n\r\n"))
if err != nil {
t.Fatal(err)
}
res, err = ReadResponse(br, &Request{Method: "GET"})
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 400 {
t.Errorf("Got non-400 response to GET *: %#v", res)
}
res, err = Get(ts.URL + "/second")
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if got := <-uric; got != "/second" {
t.Errorf("Handler saw request for %q; want /second", got)
}
}
// Tests regarding the ordering of Write, WriteHeader, Header, and
// Flush calls. In Go 1.0, rw.WriteHeader immediately flushed the
// (*response).header to the wire. In Go 1.1, the actual wire flush is
// delayed, so we could maybe tack on a Content-Length and better
// Content-Type after we see more (or all) of the output. To preserve
// compatibility with Go 1, we need to be careful to track which
// headers were live at the time of WriteHeader, so we write the same
// ones, even if the handler modifies them (~erroneously) after the
// first Write.
func TestHeaderToWire(t *testing.T) {
tests := []struct {
name string
handler func(ResponseWriter, *Request)
check func(output string) error
}{
{
name: "write without Header",
handler: func(rw ResponseWriter, r *Request) {
rw.Write([]byte("hello world"))
},
check: func(got string) error {
if !strings.Contains(got, "Content-Length:") {
return errors.New("no content-length")
}
if !strings.Contains(got, "Content-Type: text/plain") {
return errors.New("no content-type")
}
return nil
},
},
{
name: "Header mutation before write",
handler: func(rw ResponseWriter, r *Request) {
h := rw.Header()
h.Set("Content-Type", "some/type")
rw.Write([]byte("hello world"))
h.Set("Too-Late", "bogus")
},
check: func(got string) error {
if !strings.Contains(got, "Content-Length:") {
return errors.New("no content-length")
}
if !strings.Contains(got, "Content-Type: some/type") {
return errors.New("wrong content-type")
}
if strings.Contains(got, "Too-Late") {
return errors.New("don't want too-late header")
}
return nil
},
},
{
name: "write then useless Header mutation",
handler: func(rw ResponseWriter, r *Request) {
rw.Write([]byte("hello world"))
rw.Header().Set("Too-Late", "Write already wrote headers")
},
check: func(got string) error {
if strings.Contains(got, "Too-Late") {
return errors.New("header appeared from after WriteHeader")
}
return nil
},
},
{
name: "flush then write",
handler: func(rw ResponseWriter, r *Request) {
rw.(Flusher).Flush()
rw.Write([]byte("post-flush"))
rw.Header().Set("Too-Late", "Write already wrote headers")
},
check: func(got string) error {
if !strings.Contains(got, "Transfer-Encoding: chunked") {
return errors.New("not chunked")
}
if strings.Contains(got, "Too-Late") {
return errors.New("header appeared from after WriteHeader")
}
return nil
},
},
{
name: "header then flush",
handler: func(rw ResponseWriter, r *Request) {
rw.Header().Set("Content-Type", "some/type")
rw.(Flusher).Flush()
rw.Write([]byte("post-flush"))
rw.Header().Set("Too-Late", "Write already wrote headers")
},
check: func(got string) error {
if !strings.Contains(got, "Transfer-Encoding: chunked") {
return errors.New("not chunked")
}
if strings.Contains(got, "Too-Late") {
return errors.New("header appeared from after WriteHeader")
}
if !strings.Contains(got, "Content-Type: some/type") {
return errors.New("wrong content-type")
}
return nil
},
},
{
name: "sniff-on-first-write content-type",
handler: func(rw ResponseWriter, r *Request) {
rw.Write([]byte("<html><head></head><body>some html</body></html>"))
rw.Header().Set("Content-Type", "x/wrong")
},
check: func(got string) error {
if !strings.Contains(got, "Content-Type: text/html") {
return errors.New("wrong content-type; want html")
}
return nil
},
},
{
name: "explicit content-type wins",
handler: func(rw ResponseWriter, r *Request) {
rw.Header().Set("Content-Type", "some/type")
rw.Write([]byte("<html><head></head><body>some html</body></html>"))
},
check: func(got string) error {
if !strings.Contains(got, "Content-Type: some/type") {
return errors.New("wrong content-type; want html")
}
return nil
},
},
{
name: "empty handler",
handler: func(rw ResponseWriter, r *Request) {
},
check: func(got string) error {
if !strings.Contains(got, "Content-Type: text/plain") {
return errors.New("wrong content-type; want text/plain")
}
if !strings.Contains(got, "Content-Length: 0") {
return errors.New("want 0 content-length")
}
return nil
},
},
{
name: "only Header, no write",
handler: func(rw ResponseWriter, r *Request) {
rw.Header().Set("Some-Header", "some-value")
},
check: func(got string) error {
if !strings.Contains(got, "Some-Header") {
return errors.New("didn't get header")
}
return nil
},
},
{
name: "WriteHeader call",
handler: func(rw ResponseWriter, r *Request) {
rw.WriteHeader(404)
rw.Header().Set("Too-Late", "some-value")
},
check: func(got string) error {
if !strings.Contains(got, "404") {
return errors.New("wrong status")
}
if strings.Contains(got, "Too-Late") {
return errors.New("shouldn't have seen Too-Late")
}
return nil
},
},
}
for _, tc := range tests {
ht := newHandlerTest(HandlerFunc(tc.handler))
got := ht.rawResponse("GET / HTTP/1.1\nHost: golang.org")
if err := tc.check(got); err != nil {
t.Errorf("%s: %v\nGot response:\n%s", tc.name, err, got)
}
}
}
// goTimeout runs f, failing t if f takes more than ns to complete.
func goTimeout(t *testing.T, d time.Duration, f func()) {
ch := make(chan bool, 2)
timer := time.AfterFunc(d, func() {
t.Errorf("Timeout expired after %v", d)
ch <- true
})
defer timer.Stop()
go func() {
defer func() { ch <- true }()
f()
}()
<-ch
}
type errorListener struct {
errs []error
}
func (l *errorListener) Accept() (c net.Conn, err error) {
if len(l.errs) == 0 {
return nil, io.EOF
}
err = l.errs[0]
l.errs = l.errs[1:]
return
}
func (l *errorListener) Close() error {
return nil
}
func (l *errorListener) Addr() net.Addr {
return dummyAddr("test-address")
}
func TestAcceptMaxFds(t *testing.T) {
log.SetOutput(ioutil.Discard) // is noisy otherwise
defer log.SetOutput(os.Stderr)
ln := &errorListener{[]error{
&net.OpError{
Op: "accept",
Err: syscall.EMFILE,
}}}
err := Serve(ln, HandlerFunc(HandlerFunc(func(ResponseWriter, *Request) {})))
if err != io.EOF {
t.Errorf("got error %v, want EOF", err)
}
}
func TestWriteAfterHijack(t *testing.T) {
req := reqBytes("GET / HTTP/1.1\nHost: golang.org")
var buf bytes.Buffer
wrotec := make(chan bool, 1)
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
conn, bufrw, err := rw.(Hijacker).Hijack()
if err != nil {
t.Error(err)
return
}
go func() {
bufrw.Write([]byte("[hijack-to-bufw]"))
bufrw.Flush()
conn.Write([]byte("[hijack-to-conn]"))
conn.Close()
wrotec <- true
}()
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
<-wrotec
if g, w := buf.String(), "[hijack-to-bufw][hijack-to-conn]"; g != w {
t.Errorf("wrote %q; want %q", g, w)
}
}
func TestDoubleHijack(t *testing.T) {
req := reqBytes("GET / HTTP/1.1\nHost: golang.org")
var buf bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
conn, _, err := rw.(Hijacker).Hijack()
if err != nil {
t.Error(err)
return
}
_, _, err = rw.(Hijacker).Hijack()
if err == nil {
t.Errorf("got err = nil; want err != nil")
}
conn.Close()
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
}
// https://golang.org/issue/5955
// Note that this does not test the "request too large"
// exit path from the http server. This is intentional;
// not sending Connection: close is just a minor wire
// optimization and is pointless if dealing with a
// badly behaved client.
func TestHTTP10ConnectionHeader(t *testing.T) {
defer afterTest(t)
mux := NewServeMux()
mux.Handle("/", HandlerFunc(func(ResponseWriter, *Request) {}))
ts := httptest.NewServer(mux)
defer ts.Close()
// net/http uses HTTP/1.1 for requests, so write requests manually
tests := []struct {
req string // raw http request
expect []string // expected Connection header(s)
}{
{
req: "GET / HTTP/1.0\r\n\r\n",
expect: nil,
},
{
req: "OPTIONS * HTTP/1.0\r\n\r\n",
expect: nil,
},
{
req: "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n",
expect: []string{"keep-alive"},
},
}
for _, tt := range tests {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal("dial err:", err)
}
_, err = fmt.Fprint(conn, tt.req)
if err != nil {
t.Fatal("conn write err:", err)
}
resp, err := ReadResponse(bufio.NewReader(conn), &Request{Method: "GET"})
if err != nil {
t.Fatal("ReadResponse err:", err)
}
conn.Close()
resp.Body.Close()
got := resp.Header["Connection"]
if !reflect.DeepEqual(got, tt.expect) {
t.Errorf("wrong Connection headers for request %q. Got %q expect %q", tt.req, got, tt.expect)
}
}
}
// See golang.org/issue/5660
func TestServerReaderFromOrder_h1(t *testing.T) { testServerReaderFromOrder(t, h1Mode) }
func TestServerReaderFromOrder_h2(t *testing.T) { testServerReaderFromOrder(t, h2Mode) }
func testServerReaderFromOrder(t *testing.T, h2 bool) {
defer afterTest(t)
pr, pw := io.Pipe()
const size = 3 << 20
cst := newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, req *Request) {
rw.Header().Set("Content-Type", "text/plain") // prevent sniffing path
done := make(chan bool)
go func() {
io.Copy(rw, pr)
close(done)
}()
time.Sleep(25 * time.Millisecond) // give Copy a chance to break things
n, err := io.Copy(ioutil.Discard, req.Body)
if err != nil {
t.Errorf("handler Copy: %v", err)
return
}
if n != size {
t.Errorf("handler Copy = %d; want %d", n, size)
}
pw.Write([]byte("hi"))
pw.Close()
<-done
}))
defer cst.close()
req, err := NewRequest("POST", cst.ts.URL, io.LimitReader(neverEnding('a'), size))
if err != nil {
t.Fatal(err)
}
res, err := cst.c.Do(req)
if err != nil {
t.Fatal(err)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if string(all) != "hi" {
t.Errorf("Body = %q; want hi", all)
}
}
// Issue 6157, Issue 6685
func TestCodesPreventingContentTypeAndBody(t *testing.T) {
for _, code := range []int{StatusNotModified, StatusNoContent, StatusContinue} {
ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) {
if r.URL.Path == "/header" {
w.Header().Set("Content-Length", "123")
}
w.WriteHeader(code)
if r.URL.Path == "/more" {
w.Write([]byte("stuff"))
}
}))
for _, req := range []string{
"GET / HTTP/1.0",
"GET /header HTTP/1.0",
"GET /more HTTP/1.0",
"GET / HTTP/1.1\nHost: foo",
"GET /header HTTP/1.1\nHost: foo",
"GET /more HTTP/1.1\nHost: foo",
} {
got := ht.rawResponse(req)
wantStatus := fmt.Sprintf("%d %s", code, StatusText(code))
if !strings.Contains(got, wantStatus) {
t.Errorf("Code %d: Wanted %q Modified for %q: %s", code, wantStatus, req, got)
} else if strings.Contains(got, "Content-Length") {
t.Errorf("Code %d: Got a Content-Length from %q: %s", code, req, got)
} else if strings.Contains(got, "stuff") {
t.Errorf("Code %d: Response contains a body from %q: %s", code, req, got)
}
}
}
}
func TestContentTypeOkayOn204(t *testing.T) {
ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Length", "123") // suppressed
w.Header().Set("Content-Type", "foo/bar")
w.WriteHeader(204)
}))
got := ht.rawResponse("GET / HTTP/1.1\nHost: foo")
if !strings.Contains(got, "Content-Type: foo/bar") {
t.Errorf("Response = %q; want Content-Type: foo/bar", got)
}
if strings.Contains(got, "Content-Length: 123") {
t.Errorf("Response = %q; don't want a Content-Length", got)
}
}
// Issue 6995
// A server Handler can receive a Request, and then turn around and
// give a copy of that Request.Body out to the Transport (e.g. any
// proxy). So then two people own that Request.Body (both the server
// and the http client), and both think they can close it on failure.
// Therefore, all incoming server requests Bodies need to be thread-safe.
func TestTransportAndServerSharedBodyRace_h1(t *testing.T) {
testTransportAndServerSharedBodyRace(t, h1Mode)
}
func TestTransportAndServerSharedBodyRace_h2(t *testing.T) {
testTransportAndServerSharedBodyRace(t, h2Mode)
}
func testTransportAndServerSharedBodyRace(t *testing.T, h2 bool) {
defer afterTest(t)
const bodySize = 1 << 20
// errorf is like t.Errorf, but also writes to println. When
// this test fails, it hangs. This helps debugging and I've
// added this enough times "temporarily". It now gets added
// full time.
errorf := func(format string, args ...interface{}) {
v := fmt.Sprintf(format, args...)
println(v)
t.Error(v)
}
unblockBackend := make(chan bool)
backend := newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, req *Request) {
gone := rw.(CloseNotifier).CloseNotify()
didCopy := make(chan interface{})
go func() {
n, err := io.CopyN(rw, req.Body, bodySize)
didCopy <- []interface{}{n, err}
}()
isGone := false
Loop:
for {
select {
case <-didCopy:
break Loop
case <-gone:
isGone = true
case <-time.After(time.Second):
println("1 second passes in backend, proxygone=", isGone)
}
}
<-unblockBackend
}))
var quitTimer *time.Timer
defer func() { quitTimer.Stop() }()
defer backend.close()
backendRespc := make(chan *Response, 1)
var proxy *clientServerTest
proxy = newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, req *Request) {
req2, _ := NewRequest("POST", backend.ts.URL, req.Body)
req2.ContentLength = bodySize
cancel := make(chan struct{})
req2.Cancel = cancel
bresp, err := proxy.c.Do(req2)
if err != nil {
errorf("Proxy outbound request: %v", err)
return
}
_, err = io.CopyN(ioutil.Discard, bresp.Body, bodySize/2)
if err != nil {
errorf("Proxy copy error: %v", err)
return
}
backendRespc <- bresp // to close later
// Try to cause a race: Both the Transport and the proxy handler's Server
// will try to read/close req.Body (aka req2.Body)
if h2 {
close(cancel)
} else {
proxy.c.Transport.(*Transport).CancelRequest(req2)
}
rw.Write([]byte("OK"))
}))
defer proxy.close()
defer func() {
// Before we shut down our two httptest.Servers, start a timer.
// We choose 7 seconds because httptest.Server starts logging
// warnings to stderr at 5 seconds. If we don't disarm this bomb
// in 7 seconds (after the two httptest.Server.Close calls above),
// then we explode with stacks.
quitTimer = time.AfterFunc(7*time.Second, func() {
debug.SetTraceback("ALL")
stacks := make([]byte, 1<<20)
stacks = stacks[:runtime.Stack(stacks, true)]
fmt.Fprintf(os.Stderr, "%s", stacks)
log.Fatalf("Timeout.")
})
}()
defer close(unblockBackend)
req, _ := NewRequest("POST", proxy.ts.URL, io.LimitReader(neverEnding('a'), bodySize))
res, err := proxy.c.Do(req)
if err != nil {
t.Fatalf("Original request: %v", err)
}
// Cleanup, so we don't leak goroutines.
res.Body.Close()
select {
case res := <-backendRespc:
res.Body.Close()
default:
// We failed earlier. (e.g. on proxy.c.Do(req2))
}
}
// Test that a hanging Request.Body.Read from another goroutine can't
// cause the Handler goroutine's Request.Body.Close to block.
func TestRequestBodyCloseDoesntBlock(t *testing.T) {
t.Skipf("Skipping known issue; see golang.org/issue/7121")
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer afterTest(t)
readErrCh := make(chan error, 1)
errCh := make(chan error, 2)
server := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
go func(body io.Reader) {
_, err := body.Read(make([]byte, 100))
readErrCh <- err
}(req.Body)
time.Sleep(500 * time.Millisecond)
}))
defer server.Close()
closeConn := make(chan bool)
defer close(closeConn)
go func() {
conn, err := net.Dial("tcp", server.Listener.Addr().String())
if err != nil {
errCh <- err
return
}
defer conn.Close()
_, err = conn.Write([]byte("POST / HTTP/1.1\r\nConnection: close\r\nHost: foo\r\nContent-Length: 100000\r\n\r\n"))
if err != nil {
errCh <- err
return
}
// And now just block, making the server block on our
// 100000 bytes of body that will never arrive.
<-closeConn
}()
select {
case err := <-readErrCh:
if err == nil {
t.Error("Read was nil. Expected error.")
}
case err := <-errCh:
t.Error(err)
case <-time.After(5 * time.Second):
t.Error("timeout")
}
}
// test that ResponseWriter implements io.stringWriter.
func TestResponseWriterWriteString(t *testing.T) {
okc := make(chan bool, 1)
ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) {
type stringWriter interface {
WriteString(s string) (n int, err error)
}
_, ok := w.(stringWriter)
okc <- ok
}))
ht.rawResponse("GET / HTTP/1.0")
select {
case ok := <-okc:
if !ok {
t.Error("ResponseWriter did not implement io.stringWriter")
}
default:
t.Error("handler was never called")
}
}
func TestAppendTime(t *testing.T) {
var b [len(TimeFormat)]byte
t1 := time.Date(2013, 9, 21, 15, 41, 0, 0, time.FixedZone("CEST", 2*60*60))
res := ExportAppendTime(b[:0], t1)
t2, err := ParseTime(string(res))
if err != nil {
t.Fatalf("Error parsing time: %s", err)
}
if !t1.Equal(t2) {
t.Fatalf("Times differ; expected: %v, got %v (%s)", t1, t2, string(res))
}
}
func TestServerConnState(t *testing.T) {
defer afterTest(t)
handler := map[string]func(w ResponseWriter, r *Request){
"/": func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "Hello.")
},
"/close": func(w ResponseWriter, r *Request) {
w.Header().Set("Connection", "close")
fmt.Fprintf(w, "Hello.")
},
"/hijack": func(w ResponseWriter, r *Request) {
c, _, _ := w.(Hijacker).Hijack()
c.Write([]byte("HTTP/1.0 200 OK\r\nConnection: close\r\n\r\nHello."))
c.Close()
},
"/hijack-panic": func(w ResponseWriter, r *Request) {
c, _, _ := w.(Hijacker).Hijack()
c.Write([]byte("HTTP/1.0 200 OK\r\nConnection: close\r\n\r\nHello."))
c.Close()
panic("intentional panic")
},
}
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
handler[r.URL.Path](w, r)
}))
defer ts.Close()
var mu sync.Mutex // guard stateLog and connID
var stateLog = map[int][]ConnState{}
var connID = map[net.Conn]int{}
ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
ts.Config.ConnState = func(c net.Conn, state ConnState) {
if c == nil {
t.Errorf("nil conn seen in state %s", state)
return
}
mu.Lock()
defer mu.Unlock()
id, ok := connID[c]
if !ok {
id = len(connID) + 1
connID[c] = id
}
stateLog[id] = append(stateLog[id], state)
}
ts.Start()
mustGet(t, ts.URL+"/")
mustGet(t, ts.URL+"/close")
mustGet(t, ts.URL+"/")
mustGet(t, ts.URL+"/", "Connection", "close")
mustGet(t, ts.URL+"/hijack")
mustGet(t, ts.URL+"/hijack-panic")
// New->Closed
{
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
c.Close()
}
// New->Active->Closed
{
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
if _, err := io.WriteString(c, "BOGUS REQUEST\r\n\r\n"); err != nil {
t.Fatal(err)
}
c.Read(make([]byte, 1)) // block until server hangs up on us
c.Close()
}
// New->Idle->Closed
{
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
if _, err := io.WriteString(c, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"); err != nil {
t.Fatal(err)
}
res, err := ReadResponse(bufio.NewReader(c), nil)
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(ioutil.Discard, res.Body); err != nil {
t.Fatal(err)
}
c.Close()
}
want := map[int][]ConnState{
1: {StateNew, StateActive, StateIdle, StateActive, StateClosed},
2: {StateNew, StateActive, StateIdle, StateActive, StateClosed},
3: {StateNew, StateActive, StateHijacked},
4: {StateNew, StateActive, StateHijacked},
5: {StateNew, StateClosed},
6: {StateNew, StateActive, StateClosed},
7: {StateNew, StateActive, StateIdle, StateClosed},
}
logString := func(m map[int][]ConnState) string {
var b bytes.Buffer
var keys []int
for id := range m {
keys = append(keys, id)
}
sort.Ints(keys)
for _, id := range keys {
fmt.Fprintf(&b, "Conn %d: ", id)
for _, s := range m[id] {
fmt.Fprintf(&b, "%s ", s)
}
b.WriteString("\n")
}
return b.String()
}
for i := 0; i < 5; i++ {
time.Sleep(time.Duration(i) * 50 * time.Millisecond)
mu.Lock()
match := reflect.DeepEqual(stateLog, want)
mu.Unlock()
if match {
return
}
}
mu.Lock()
t.Errorf("Unexpected events.\nGot log: %s\n Want: %s\n", logString(stateLog), logString(want))
mu.Unlock()
}
func mustGet(t *testing.T, url string, headers ...string) {
req, err := NewRequest("GET", url, nil)
if err != nil {
t.Fatal(err)
}
for len(headers) > 0 {
req.Header.Add(headers[0], headers[1])
headers = headers[2:]
}
res, err := DefaultClient.Do(req)
if err != nil {
t.Errorf("Error fetching %s: %v", url, err)
return
}
_, err = ioutil.ReadAll(res.Body)
defer res.Body.Close()
if err != nil {
t.Errorf("Error reading %s: %v", url, err)
}
}
func TestServerKeepAlivesEnabled(t *testing.T) {
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
ts.Config.SetKeepAlivesEnabled(false)
ts.Start()
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
if !res.Close {
t.Errorf("Body.Close == false; want true")
}
}
// golang.org/issue/7856
func TestServerEmptyBodyRace_h1(t *testing.T) { testServerEmptyBodyRace(t, h1Mode) }
func TestServerEmptyBodyRace_h2(t *testing.T) { testServerEmptyBodyRace(t, h2Mode) }
func testServerEmptyBodyRace(t *testing.T, h2 bool) {
defer afterTest(t)
var n int32
cst := newClientServerTest(t, h2, HandlerFunc(func(rw ResponseWriter, req *Request) {
atomic.AddInt32(&n, 1)
}))
defer cst.close()
var wg sync.WaitGroup
const reqs = 20
for i := 0; i < reqs; i++ {
wg.Add(1)
go func() {
defer wg.Done()
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Error(err)
return
}
defer res.Body.Close()
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Error(err)
return
}
}()
}
wg.Wait()
if got := atomic.LoadInt32(&n); got != reqs {
t.Errorf("handler ran %d times; want %d", got, reqs)
}
}
func TestServerConnStateNew(t *testing.T) {
sawNew := false // if the test is buggy, we'll race on this variable.
srv := &Server{
ConnState: func(c net.Conn, state ConnState) {
if state == StateNew {
sawNew = true // testing that this write isn't racy
}
},
Handler: HandlerFunc(func(w ResponseWriter, r *Request) {}), // irrelevant
}
srv.Serve(&oneConnListener{
conn: &rwTestConn{
Reader: strings.NewReader("GET / HTTP/1.1\r\nHost: foo\r\n\r\n"),
Writer: ioutil.Discard,
},
})
if !sawNew { // testing that this read isn't racy
t.Error("StateNew not seen")
}
}
type closeWriteTestConn struct {
rwTestConn
didCloseWrite bool
}
func (c *closeWriteTestConn) CloseWrite() error {
c.didCloseWrite = true
return nil
}
func TestCloseWrite(t *testing.T) {
var srv Server
var testConn closeWriteTestConn
c := ExportServerNewConn(&srv, &testConn)
ExportCloseWriteAndWait(c)
if !testConn.didCloseWrite {
t.Error("didn't see CloseWrite call")
}
}
// This verifies that a handler can Flush and then Hijack.
//
// An similar test crashed once during development, but it was only
// testing this tangentially and temporarily until another TODO was
// fixed.
//
// So add an explicit test for this.
func TestServerFlushAndHijack(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
io.WriteString(w, "Hello, ")
w.(Flusher).Flush()
conn, buf, _ := w.(Hijacker).Hijack()
buf.WriteString("6\r\nworld!\r\n0\r\n\r\n")
if err := buf.Flush(); err != nil {
t.Error(err)
}
if err := conn.Close(); err != nil {
t.Error(err)
}
}))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
all, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if want := "Hello, world!"; string(all) != want {
t.Errorf("Got %q; want %q", all, want)
}
}
// golang.org/issue/8534 -- the Server shouldn't reuse a connection
// for keep-alive after it's seen any Write error (e.g. a timeout) on
// that net.Conn.
//
// To test, verify we don't timeout or see fewer unique client
// addresses (== unique connections) than requests.
func TestServerKeepAliveAfterWriteError(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer afterTest(t)
const numReq = 3
addrc := make(chan string, numReq)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
addrc <- r.RemoteAddr
time.Sleep(500 * time.Millisecond)
w.(Flusher).Flush()
}))
ts.Config.WriteTimeout = 250 * time.Millisecond
ts.Start()
defer ts.Close()
errc := make(chan error, numReq)
go func() {
defer close(errc)
for i := 0; i < numReq; i++ {
res, err := Get(ts.URL)
if res != nil {
res.Body.Close()
}
errc <- err
}
}()
timeout := time.NewTimer(numReq * 2 * time.Second) // 4x overkill
defer timeout.Stop()
addrSeen := map[string]bool{}
numOkay := 0
for {
select {
case v := <-addrc:
addrSeen[v] = true
case err, ok := <-errc:
if !ok {
if len(addrSeen) != numReq {
t.Errorf("saw %d unique client addresses; want %d", len(addrSeen), numReq)
}
if numOkay != 0 {
t.Errorf("got %d successful client requests; want 0", numOkay)
}
return
}
if err == nil {
numOkay++
}
case <-timeout.C:
t.Fatal("timeout waiting for requests to complete")
}
}
}
// Issue 9987: shouldn't add automatic Content-Length (or
// Content-Type) if a Transfer-Encoding was set by the handler.
func TestNoContentLengthIfTransferEncoding(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Transfer-Encoding", "foo")
io.WriteString(w, "<html>")
}))
defer ts.Close()
c, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("Dial: %v", err)
}
defer c.Close()
if _, err := io.WriteString(c, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"); err != nil {
t.Fatal(err)
}
bs := bufio.NewScanner(c)
var got bytes.Buffer
for bs.Scan() {
if strings.TrimSpace(bs.Text()) == "" {
break
}
got.WriteString(bs.Text())
got.WriteByte('\n')
}
if err := bs.Err(); err != nil {
t.Fatal(err)
}
if strings.Contains(got.String(), "Content-Length") {
t.Errorf("Unexpected Content-Length in response headers: %s", got.String())
}
if strings.Contains(got.String(), "Content-Type") {
t.Errorf("Unexpected Content-Type in response headers: %s", got.String())
}
}
// tolerate extra CRLF(s) before Request-Line on subsequent requests on a conn
// Issue 10876.
func TestTolerateCRLFBeforeRequestLine(t *testing.T) {
req := []byte("POST / HTTP/1.1\r\nHost: golang.org\r\nContent-Length: 3\r\n\r\nABC" +
"\r\n\r\n" + // <-- this stuff is bogus, but we'll ignore it
"GET / HTTP/1.1\r\nHost: golang.org\r\n\r\n")
var buf bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
numReq := 0
go Serve(ln, HandlerFunc(func(rw ResponseWriter, r *Request) {
numReq++
}))
<-conn.closec
if numReq != 2 {
t.Errorf("num requests = %d; want 2", numReq)
t.Logf("Res: %s", buf.Bytes())
}
}
func TestIssue13893_Expect100(t *testing.T) {
// test that the Server doesn't filter out Expect headers.
req := reqBytes(`PUT /readbody HTTP/1.1
User-Agent: PycURL/7.22.0
Host: 127.0.0.1:9000
Accept: */*
Expect: 100-continue
Content-Length: 10
HelloWorld
`)
var buf bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
go Serve(ln, HandlerFunc(func(w ResponseWriter, r *Request) {
if _, ok := r.Header["Expect"]; !ok {
t.Error("Expect header should not be filtered out")
}
}))
<-conn.closec
}
func TestIssue11549_Expect100(t *testing.T) {
req := reqBytes(`PUT /readbody HTTP/1.1
User-Agent: PycURL/7.22.0
Host: 127.0.0.1:9000
Accept: */*
Expect: 100-continue
Content-Length: 10
HelloWorldPUT /noreadbody HTTP/1.1
User-Agent: PycURL/7.22.0
Host: 127.0.0.1:9000
Accept: */*
Expect: 100-continue
Content-Length: 10
GET /should-be-ignored HTTP/1.1
Host: foo
`)
var buf bytes.Buffer
conn := &rwTestConn{
Reader: bytes.NewReader(req),
Writer: &buf,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
numReq := 0
go Serve(ln, HandlerFunc(func(w ResponseWriter, r *Request) {
numReq++
if r.URL.Path == "/readbody" {
ioutil.ReadAll(r.Body)
}
io.WriteString(w, "Hello world!")
}))
<-conn.closec
if numReq != 2 {
t.Errorf("num requests = %d; want 2", numReq)
}
if !strings.Contains(buf.String(), "Connection: close\r\n") {
t.Errorf("expected 'Connection: close' in response; got: %s", buf.String())
}
}
// If a Handler finishes and there's an unread request body,
// verify the server try to do implicit read on it before replying.
func TestHandlerFinishSkipBigContentLengthRead(t *testing.T) {
conn := &testConn{closec: make(chan bool)}
conn.readBuf.Write([]byte(fmt.Sprintf(
"POST / HTTP/1.1\r\n" +
"Host: test\r\n" +
"Content-Length: 9999999999\r\n" +
"\r\n" + strings.Repeat("a", 1<<20))))
ls := &oneConnListener{conn}
var inHandlerLen int
go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
inHandlerLen = conn.readBuf.Len()
rw.WriteHeader(404)
}))
<-conn.closec
afterHandlerLen := conn.readBuf.Len()
if afterHandlerLen != inHandlerLen {
t.Errorf("unexpected implicit read. Read buffer went from %d -> %d", inHandlerLen, afterHandlerLen)
}
}
func TestHandlerSetsBodyNil_h1(t *testing.T) { testHandlerSetsBodyNil(t, h1Mode) }
func TestHandlerSetsBodyNil_h2(t *testing.T) { testHandlerSetsBodyNil(t, h2Mode) }
func testHandlerSetsBodyNil(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
r.Body = nil
fmt.Fprintf(w, "%v", r.RemoteAddr)
}))
defer cst.close()
get := func() string {
res, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
return string(slurp)
}
a, b := get(), get()
if a != b {
t.Errorf("Failed to reuse connections between requests: %v vs %v", a, b)
}
}
// Test that we validate the Host header.
// Issue 11206 (invalid bytes in Host) and 13624 (Host present in HTTP/1.1)
func TestServerValidatesHostHeader(t *testing.T) {
tests := []struct {
proto string
host string
want int
}{
{"HTTP/1.1", "", 400},
{"HTTP/1.1", "Host: \r\n", 200},
{"HTTP/1.1", "Host: 1.2.3.4\r\n", 200},
{"HTTP/1.1", "Host: foo.com\r\n", 200},
{"HTTP/1.1", "Host: foo-bar_baz.com\r\n", 200},
{"HTTP/1.1", "Host: foo.com:80\r\n", 200},
{"HTTP/1.1", "Host: ::1\r\n", 200},
{"HTTP/1.1", "Host: [::1]\r\n", 200}, // questionable without port, but accept it
{"HTTP/1.1", "Host: [::1]:80\r\n", 200},
{"HTTP/1.1", "Host: [::1%25en0]:80\r\n", 200},
{"HTTP/1.1", "Host: 1.2.3.4\r\n", 200},
{"HTTP/1.1", "Host: \x06\r\n", 400},
{"HTTP/1.1", "Host: \xff\r\n", 400},
{"HTTP/1.1", "Host: {\r\n", 400},
{"HTTP/1.1", "Host: }\r\n", 400},
{"HTTP/1.1", "Host: first\r\nHost: second\r\n", 400},
// HTTP/1.0 can lack a host header, but if present
// must play by the rules too:
{"HTTP/1.0", "", 200},
{"HTTP/1.0", "Host: first\r\nHost: second\r\n", 400},
{"HTTP/1.0", "Host: \xff\r\n", 400},
}
for _, tt := range tests {
conn := &testConn{closec: make(chan bool, 1)}
io.WriteString(&conn.readBuf, "GET / "+tt.proto+"\r\n"+tt.host+"\r\n")
ln := &oneConnListener{conn}
go Serve(ln, HandlerFunc(func(ResponseWriter, *Request) {}))
<-conn.closec
res, err := ReadResponse(bufio.NewReader(&conn.writeBuf), nil)
if err != nil {
t.Errorf("For %s %q, ReadResponse: %v", tt.proto, tt.host, res)
continue
}
if res.StatusCode != tt.want {
t.Errorf("For %s %q, Status = %d; want %d", tt.proto, tt.host, res.StatusCode, tt.want)
}
}
}
// Test that we validate the valid bytes in HTTP/1 headers.
// Issue 11207.
func TestServerValidatesHeaders(t *testing.T) {
tests := []struct {
header string
want int
}{
{"", 200},
{"Foo: bar\r\n", 200},
{"X-Foo: bar\r\n", 200},
{"Foo: a space\r\n", 200},
{"A space: foo\r\n", 400}, // space in header
{"foo\xffbar: foo\r\n", 400}, // binary in header
{"foo\x00bar: foo\r\n", 400}, // binary in header
{"foo: foo foo\r\n", 200}, // LWS space is okay
{"foo: foo\tfoo\r\n", 200}, // LWS tab is okay
{"foo: foo\x00foo\r\n", 400}, // CTL 0x00 in value is bad
{"foo: foo\x7ffoo\r\n", 400}, // CTL 0x7f in value is bad
{"foo: foo\xfffoo\r\n", 200}, // non-ASCII high octets in value are fine
}
for _, tt := range tests {
conn := &testConn{closec: make(chan bool, 1)}
io.WriteString(&conn.readBuf, "GET / HTTP/1.1\r\nHost: foo\r\n"+tt.header+"\r\n")
ln := &oneConnListener{conn}
go Serve(ln, HandlerFunc(func(ResponseWriter, *Request) {}))
<-conn.closec
res, err := ReadResponse(bufio.NewReader(&conn.writeBuf), nil)
if err != nil {
t.Errorf("For %q, ReadResponse: %v", tt.header, res)
continue
}
if res.StatusCode != tt.want {
t.Errorf("For %q, Status = %d; want %d", tt.header, res.StatusCode, tt.want)
}
}
}
func BenchmarkClientServer(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
fmt.Fprintf(rw, "Hello world.\n")
}))
defer ts.Close()
b.StartTimer()
for i := 0; i < b.N; i++ {
res, err := Get(ts.URL)
if err != nil {
b.Fatal("Get:", err)
}
all, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
b.Fatal("ReadAll:", err)
}
body := string(all)
if body != "Hello world.\n" {
b.Fatal("Got body:", body)
}
}
b.StopTimer()
}
func BenchmarkClientServerParallel4(b *testing.B) {
benchmarkClientServerParallel(b, 4, false)
}
func BenchmarkClientServerParallel64(b *testing.B) {
benchmarkClientServerParallel(b, 64, false)
}
func BenchmarkClientServerParallelTLS4(b *testing.B) {
benchmarkClientServerParallel(b, 4, true)
}
func BenchmarkClientServerParallelTLS64(b *testing.B) {
benchmarkClientServerParallel(b, 64, true)
}
func benchmarkClientServerParallel(b *testing.B, parallelism int, useTLS bool) {
b.ReportAllocs()
ts := httptest.NewUnstartedServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
fmt.Fprintf(rw, "Hello world.\n")
}))
if useTLS {
ts.StartTLS()
} else {
ts.Start()
}
defer ts.Close()
b.ResetTimer()
b.SetParallelism(parallelism)
b.RunParallel(func(pb *testing.PB) {
noVerifyTransport := &Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
}
defer noVerifyTransport.CloseIdleConnections()
client := &Client{Transport: noVerifyTransport}
for pb.Next() {
res, err := client.Get(ts.URL)
if err != nil {
b.Logf("Get: %v", err)
continue
}
all, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
b.Logf("ReadAll: %v", err)
continue
}
body := string(all)
if body != "Hello world.\n" {
panic("Got body: " + body)
}
}
})
}
// A benchmark for profiling the server without the HTTP client code.
// The client code runs in a subprocess.
//
// For use like:
// $ go test -c
// $ ./http.test -test.run=XX -test.bench=BenchmarkServer -test.benchtime=15s -test.cpuprofile=http.prof
// $ go tool pprof http.test http.prof
// (pprof) web
func BenchmarkServer(b *testing.B) {
b.ReportAllocs()
// Child process mode;
if url := os.Getenv("TEST_BENCH_SERVER_URL"); url != "" {
n, err := strconv.Atoi(os.Getenv("TEST_BENCH_CLIENT_N"))
if err != nil {
panic(err)
}
for i := 0; i < n; i++ {
res, err := Get(url)
if err != nil {
log.Panicf("Get: %v", err)
}
all, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Panicf("ReadAll: %v", err)
}
body := string(all)
if body != "Hello world.\n" {
log.Panicf("Got body: %q", body)
}
}
os.Exit(0)
return
}
var res = []byte("Hello world.\n")
b.StopTimer()
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.Write(res)
}))
defer ts.Close()
b.StartTimer()
cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkServer$")
cmd.Env = append([]string{
fmt.Sprintf("TEST_BENCH_CLIENT_N=%d", b.N),
fmt.Sprintf("TEST_BENCH_SERVER_URL=%s", ts.URL),
}, os.Environ()...)
out, err := cmd.CombinedOutput()
if err != nil {
b.Errorf("Test failure: %v, with output: %s", err, out)
}
}
// getNoBody wraps Get but closes any Response.Body before returning the response.
func getNoBody(urlStr string) (*Response, error) {
res, err := Get(urlStr)
if err != nil {
return nil, err
}
res.Body.Close()
return res, nil
}
// A benchmark for profiling the client without the HTTP server code.
// The server code runs in a subprocess.
func BenchmarkClient(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
defer afterTest(b)
port := os.Getenv("TEST_BENCH_SERVER_PORT") // can be set by user
if port == "" {
port = "39207"
}
var data = []byte("Hello world.\n")
if server := os.Getenv("TEST_BENCH_SERVER"); server != "" {
// Server process mode.
HandleFunc("/", func(w ResponseWriter, r *Request) {
r.ParseForm()
if r.Form.Get("stop") != "" {
os.Exit(0)
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Write(data)
})
log.Fatal(ListenAndServe("localhost:"+port, nil))
}
// Start server process.
cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkClient$")
cmd.Env = append(os.Environ(), "TEST_BENCH_SERVER=yes")
if err := cmd.Start(); err != nil {
b.Fatalf("subprocess failed to start: %v", err)
}
defer cmd.Process.Kill()
done := make(chan error)
go func() {
done <- cmd.Wait()
}()
// Wait for the server process to respond.
url := "http://localhost:" + port + "/"
for i := 0; i < 100; i++ {
time.Sleep(50 * time.Millisecond)
if _, err := getNoBody(url); err == nil {
break
}
if i == 99 {
b.Fatalf("subprocess does not respond")
}
}
// Do b.N requests to the server.
b.StartTimer()
for i := 0; i < b.N; i++ {
res, err := Get(url)
if err != nil {
b.Fatalf("Get: %v", err)
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
b.Fatalf("ReadAll: %v", err)
}
if bytes.Compare(body, data) != 0 {
b.Fatalf("Got body: %q", body)
}
}
b.StopTimer()
// Instruct server process to stop.
getNoBody(url + "?stop=yes")
select {
case err := <-done:
if err != nil {
b.Fatalf("subprocess failed: %v", err)
}
case <-time.After(5 * time.Second):
b.Fatalf("subprocess did not stop")
}
}
func BenchmarkServerFakeConnNoKeepAlive(b *testing.B) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.0
Host: golang.org
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17
Accept-Encoding: gzip,deflate,sdch
Accept-Language: en-US,en;q=0.8
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3
`)
res := []byte("Hello world!\n")
conn := &testConn{
// testConn.Close will not push into the channel
// if it's full.
closec: make(chan bool, 1),
}
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.Write(res)
})
ln := new(oneConnListener)
for i := 0; i < b.N; i++ {
conn.readBuf.Reset()
conn.writeBuf.Reset()
conn.readBuf.Write(req)
ln.conn = conn
Serve(ln, handler)
<-conn.closec
}
}
// repeatReader reads content count times, then EOFs.
type repeatReader struct {
content []byte
count int
off int
}
func (r *repeatReader) Read(p []byte) (n int, err error) {
if r.count <= 0 {
return 0, io.EOF
}
n = copy(p, r.content[r.off:])
r.off += n
if r.off == len(r.content) {
r.count--
r.off = 0
}
return
}
func BenchmarkServerFakeConnWithKeepAlive(b *testing.B) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.1
Host: golang.org
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17
Accept-Encoding: gzip,deflate,sdch
Accept-Language: en-US,en;q=0.8
Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3
`)
res := []byte("Hello world!\n")
conn := &rwTestConn{
Reader: &repeatReader{content: req, count: b.N},
Writer: ioutil.Discard,
closec: make(chan bool, 1),
}
handled := 0
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
handled++
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.Write(res)
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
if b.N != handled {
b.Errorf("b.N=%d but handled %d", b.N, handled)
}
}
// same as above, but representing the most simple possible request
// and handler. Notably: the handler does not call rw.Header().
func BenchmarkServerFakeConnWithKeepAliveLite(b *testing.B) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.1
Host: golang.org
`)
res := []byte("Hello world!\n")
conn := &rwTestConn{
Reader: &repeatReader{content: req, count: b.N},
Writer: ioutil.Discard,
closec: make(chan bool, 1),
}
handled := 0
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
handled++
rw.Write(res)
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
if b.N != handled {
b.Errorf("b.N=%d but handled %d", b.N, handled)
}
}
const someResponse = "<html>some response</html>"
// A Response that's just no bigger than 2KB, the buffer-before-chunking threshold.
var response = bytes.Repeat([]byte(someResponse), 2<<10/len(someResponse))
// Both Content-Type and Content-Length set. Should be no buffering.
func BenchmarkServerHandlerTypeLen(b *testing.B) {
benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Type", "text/html")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
}))
}
// A Content-Type is set, but no length. No sniffing, but will count the Content-Length.
func BenchmarkServerHandlerNoLen(b *testing.B) {
benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Type", "text/html")
w.Write(response)
}))
}
// A Content-Length is set, but the Content-Type will be sniffed.
func BenchmarkServerHandlerNoType(b *testing.B) {
benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
}))
}
// Neither a Content-Type or Content-Length, so sniffed and counted.
func BenchmarkServerHandlerNoHeader(b *testing.B) {
benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Write(response)
}))
}
func benchmarkHandler(b *testing.B, h Handler) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.1
Host: golang.org
`)
conn := &rwTestConn{
Reader: &repeatReader{content: req, count: b.N},
Writer: ioutil.Discard,
closec: make(chan bool, 1),
}
handled := 0
handler := HandlerFunc(func(rw ResponseWriter, r *Request) {
handled++
h.ServeHTTP(rw, r)
})
ln := &oneConnListener{conn: conn}
go Serve(ln, handler)
<-conn.closec
if b.N != handled {
b.Errorf("b.N=%d but handled %d", b.N, handled)
}
}
func BenchmarkServerHijack(b *testing.B) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.1
Host: golang.org
`)
h := HandlerFunc(func(w ResponseWriter, r *Request) {
conn, _, err := w.(Hijacker).Hijack()
if err != nil {
panic(err)
}
conn.Close()
})
conn := &rwTestConn{
Writer: ioutil.Discard,
closec: make(chan bool, 1),
}
ln := &oneConnListener{conn: conn}
for i := 0; i < b.N; i++ {
conn.Reader = bytes.NewReader(req)
ln.conn = conn
Serve(ln, h)
<-conn.closec
}
}
func BenchmarkCloseNotifier(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
sawClose := make(chan bool)
ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
<-rw.(CloseNotifier).CloseNotify()
sawClose <- true
}))
defer ts.Close()
tot := time.NewTimer(5 * time.Second)
defer tot.Stop()
b.StartTimer()
for i := 0; i < b.N; i++ {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
b.Fatalf("error dialing: %v", err)
}
_, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n")
if err != nil {
b.Fatal(err)
}
conn.Close()
tot.Reset(5 * time.Second)
select {
case <-sawClose:
case <-tot.C:
b.Fatal("timeout")
}
}
b.StopTimer()
}
|
[
"\"TEST_BENCH_SERVER_URL\"",
"\"TEST_BENCH_CLIENT_N\"",
"\"TEST_BENCH_SERVER_PORT\"",
"\"TEST_BENCH_SERVER\""
] |
[] |
[
"TEST_BENCH_SERVER",
"TEST_BENCH_SERVER_URL",
"TEST_BENCH_SERVER_PORT",
"TEST_BENCH_CLIENT_N"
] |
[]
|
["TEST_BENCH_SERVER", "TEST_BENCH_SERVER_URL", "TEST_BENCH_SERVER_PORT", "TEST_BENCH_CLIENT_N"]
|
go
| 4 | 0 | |
examples/timing.py
|
import requests
import os
import numpy as np
import json
import sys
import time
import uproot
import numba
import hepaccelerate
import hepaccelerate.kernels as kernels
from hepaccelerate.utils import Results, Dataset, Histogram, choose_backend
from tests.kernel_test import load_dataset
USE_CUDA = int(os.environ.get("HEPACCELERATE_CUDA", 0)) == 1
nplib, backend = choose_backend(use_cuda=USE_CUDA)
def time_kernel(dataset, test_kernel):
# ensure it's compiled
test_kernel(dataset)
n = len(dataset)
t0 = time.time()
for i in range(5):
test_kernel(dataset)
t1 = time.time()
dt = (t1 - t0) / 5.0
speed = float(n) / dt
return speed
def test_kernel_sum_in_offsets(dataset):
muons = dataset.structs["Muon"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
z = kernels.sum_in_offsets(
backend, muons.offsets, muons.pt, sel_ev, sel_mu, dtype=nplib.float32
)
def test_kernel_simple_cut(dataset):
muons = dataset.structs["Muon"][0]
sel_mu = muons.pt > 30.0
def test_kernel_max_in_offsets(dataset):
muons = dataset.structs["Muon"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
z = kernels.max_in_offsets(backend, muons.offsets, muons.pt, sel_ev, sel_mu)
def test_kernel_get_in_offsets(dataset):
muons = dataset.structs["Muon"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
inds = nplib.zeros(muons.numevents(), dtype=nplib.int8)
inds[:] = 0
z = kernels.get_in_offsets(backend, muons.offsets, muons.pt, inds, sel_ev, sel_mu)
def test_kernel_mask_deltar_first(dataset):
muons = dataset.structs["Muon"][0]
jet = dataset.structs["Jet"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
sel_jet = jet.pt > 10
muons_matched_to_jet = kernels.mask_deltar_first(
backend,
{"offsets": muons.offsets, "eta": muons.eta, "phi": muons.phi},
sel_mu,
{"offsets": jet.offsets, "eta": jet.eta, "phi": jet.phi},
sel_jet,
0.3,
)
def test_kernel_histogram_from_vector(dataset):
muons = dataset.structs["Muon"][0]
weights = 2 * nplib.ones(muons.numobjects(), dtype=nplib.float32)
ret = kernels.histogram_from_vector(
backend, muons.pt, weights, nplib.linspace(0, 200, 100, dtype=nplib.float32)
)
def test_kernel_histogram_from_vector_several(dataset):
muons = dataset.structs["Muon"][0]
mask = nplib.ones(muons.numobjects(), dtype=nplib.bool)
mask[:100] = False
weights = 2 * nplib.ones(muons.numobjects(), dtype=nplib.float32)
variables = [
(muons.pt, nplib.linspace(0, 200, 100, dtype=nplib.float32)),
(muons.eta, nplib.linspace(-4, 4, 100, dtype=nplib.float32)),
(muons.phi, nplib.linspace(-4, 4, 100, dtype=nplib.float32)),
(muons.mass, nplib.linspace(0, 200, 100, dtype=nplib.float32)),
(muons.charge, nplib.array([-1, 0, 1, 2], dtype=nplib.float32)),
]
ret = kernels.histogram_from_vector_several(backend, variables, weights, mask)
def test_kernel_select_opposite_sign(dataset):
muons = dataset.structs["Muon"][0]
sel_ev = nplib.ones(muons.numevents(), dtype=nplib.bool)
sel_mu = nplib.ones(muons.numobjects(), dtype=nplib.bool)
muons_passing_os = kernels.select_opposite_sign(
backend, muons.offsets, muons.charge, sel_mu
)
def test_timing(ds):
with open("data/kernel_benchmarks.txt", "a") as of:
for i in range(5):
ret = run_timing(ds)
of.write(json.dumps(ret) + "\n")
def run_timing(ds):
print("Testing memory transfer speed")
t0 = time.time()
for i in range(5):
ds.move_to_device(nplib)
t1 = time.time()
dt = (t1 - t0) / 5.0
ret = {
"use_cuda": USE_CUDA,
"num_threads": numba.config.NUMBA_NUM_THREADS,
"use_avx": numba.config.ENABLE_AVX,
"num_events": ds.numevents(),
"memsize": ds.memsize(),
}
print(
"Memory transfer speed: {0:.2f} MHz, event size {1:.2f} bytes, data transfer speed {2:.2f} MB/s".format(
ds.numevents() / dt / 1000.0 / 1000.0,
ds.eventsize(),
ds.memsize() / dt / 1000 / 1000,
)
)
ret["memory_transfer"] = ds.numevents() / dt / 1000.0 / 1000.0
t = time_kernel(ds, test_kernel_sum_in_offsets)
print("sum_in_offsets {0:.2f} MHz".format(t / 1000 / 1000))
ret["sum_in_offsets"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_simple_cut)
print("simple_cut {0:.2f} MHz".format(t / 1000 / 1000))
ret["simple_cut"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_max_in_offsets)
print("max_in_offsets {0:.2f} MHz".format(t / 1000 / 1000))
ret["max_in_offsets"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_get_in_offsets)
print("get_in_offsets {0:.2f} MHz".format(t / 1000 / 1000))
ret["get_in_offsets"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_mask_deltar_first)
print("mask_deltar_first {0:.2f} MHz".format(t / 1000 / 1000))
ret["mask_deltar_first"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_select_opposite_sign)
print("select_muons_opposite_sign {0:.2f} MHz".format(t / 1000 / 1000))
ret["select_muons_opposite_sign"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_histogram_from_vector)
print("histogram_from_vector {0:.2f} MHz".format(t / 1000 / 1000))
ret["histogram_from_vector"] = t / 1000 / 1000
t = time_kernel(ds, test_kernel_histogram_from_vector_several)
print("histogram_from_vector_several {0:.2f} MHz".format(t / 1000 / 1000))
ret["histogram_from_vector_several"] = t / 1000 / 1000
return ret
if __name__ == "__main__":
dataset = load_dataset(nplib, 5)
test_timing(dataset)
|
[] |
[] |
[
"HEPACCELERATE_CUDA"
] |
[]
|
["HEPACCELERATE_CUDA"]
|
python
| 1 | 0 | |
viper/sp.py
|
"""
Start project (sp)
"""
import os
import platform
import click
import subprocess
SHELL_OPTIONS = ['tcsh', 'sh', 'bash']
# ROOT_DEFAULT = "/prj"
def start_project(project, dev=False, name=None, prefix=False, shell=None, init=None, skill=None):
"""
Start project ("sp" CLI command)
:param project: The name of the project to open
:param dev: A flag for toggling the development mode
:param name: Conda environment name to activate.
prefix and name are mutually exclusive options.
:param prefix: Conda prefix to activate.
prefix and name are mutually exclusive options.
:param shell: The type of shell script specified with the --init or -i
options
:param init: Paths to one or more shell initialization scripts which will
be sourced, each delimited by a ":",
this option can also be specified multiple times to add additional scripts.
:param skill: Paths to one or more SKILL initialization scripts which will be loaded using loadi,
each delimited by a \":\", this option can also be specified multiple times to add
additional scripts.
:return:
"""
# Set dev env variable when dev is set
if dev:
os.environ["VIPER_DEV"] = "TRUE"
# Parse shell and skill script initialization paths
def process_file_paths(scripts):
if isinstance(scripts, str):
scripts = list(scripts)
scripts_exist = []
if scripts is not None:
for script_top in scripts:
for script_bottom in script_top.split(":"):
if os.path.isfile(script_bottom):
scripts_exist.append(script_bottom)
if len(scripts_exist) > 0:
scripts_out = ":".join(scripts_exist)
else:
scripts_out = None
return scripts_out
init = process_file_paths(init)
if init is None:
init = "None"
skill = process_file_paths(skill)
if skill is not None:
os.environ["VIPER_SP_SKILL_INIT"] = skill
if shell is None:
shell = default_shell()
commands = {
"tcsh": "sp_tcsh",
"sh": "sp_sh",
"bash": "sp_bash",
}
# Run command
subprocess.run([commands[shell], str(project),
str(name), str(prefix), init],
env=os.environ)
def default_shell():
"""selects the default shell for sp"""
if platform.system() == "Linux":
login_shell = os.path.basename(os.environ["SHELL"])
if login_shell in SHELL_OPTIONS:
default = login_shell
elif os.environ["VIPER_SP_SHELL_DEFAULT"] is not None:
default = os.environ["VIPER_SP_SHELL_DEFAULT"]
else:
default = "tcsh"
elif platform.system() == "Windows":
default = "cmd"
else:
raise RuntimeError("Unsupported platform: %s", platform.system())
return default
# Command Line Interface
@click.command()
@click.option("--dev/--nodev", "-d/-o", default=False, is_flag=True,
help="A flag for toggling the development mode")
@click.option("--name", "-n", default=None,
help="Conda name to activate. prefix and name are mutually exclusive options.")
@click.option("--prefix", "-p", default=None,
help="Conda Prefix to activate. prefix and name are mutually exclusive options.")
@click.option("--shell", "-s", default=default_shell(), type=click.Choice(SHELL_OPTIONS, case_sensitive=False),
help="The type of shell script specified with the --init or -i options")
@click.option("--init", "-i", default=None, type=str, multiple=True,
help="Paths to one or more shell initialization scripts which will be sourced, each delimited by a \":\","
" this option can also be specified multiple times to add additional scripts.")
@click.option("--skill", "-k", "-replay", default=None, type=str, multiple=True,
help="Paths to one or more SKILL initialization scripts which will be loaded using loadi, "
"each delimited by a \":\", this option can also be specified multiple times to add "
"additional scripts.")
@click.version_option()
@click.argument("project", type=str)
def sp(project, dev, name, prefix, shell, init, skill):
"""
Start Project
sp [options] project
starts the given Cadence Virtuoso project
"""
start_project(project, dev, name, prefix, shell, init, skill)
if __name__ == '__main__':
sp(auto_envvar_prefix='VIPER')
|
[] |
[] |
[
"SHELL",
"VIPER_SP_SHELL_DEFAULT",
"VIPER_SP_SKILL_INIT",
"VIPER_DEV"
] |
[]
|
["SHELL", "VIPER_SP_SHELL_DEFAULT", "VIPER_SP_SKILL_INIT", "VIPER_DEV"]
|
python
| 4 | 0 | |
biobb_adapters/pycompss/biobb_amber/cphstats/cestats_run.py
|
# Python
import os
import sys
import traceback
# Pycompss
from pycompss.api.task import task
from pycompss.api.parameter import FILE_IN, FILE_OUT
# Adapters commons pycompss
from biobb_adapters.pycompss.biobb_commons import task_config
# Wrapped Biobb
from biobb_amber.cphstats.cestats_run import CestatsRun # Importing class instead of module to avoid name collision
task_time_out = int(os.environ.get('TASK_TIME_OUT', 0))
@task(input_cein_path=FILE_IN, input_ceout_path=FILE_IN, output_dat_path=FILE_OUT, output_population_path=FILE_OUT, output_chunk_path=FILE_OUT, output_cumulative_path=FILE_OUT, output_conditional_path=FILE_OUT, output_chunk_conditional_path=FILE_OUT,
on_failure="IGNORE", time_out=task_time_out)
def _cestatsrun(input_cein_path, input_ceout_path, output_dat_path, output_population_path, output_chunk_path, output_cumulative_path, output_conditional_path, output_chunk_conditional_path, properties, **kwargs):
task_config.pop_pmi(os.environ)
try:
CestatsRun(input_cein_path=input_cein_path, input_ceout_path=input_ceout_path, output_dat_path=output_dat_path, output_population_path=output_population_path, output_chunk_path=output_chunk_path, output_cumulative_path=output_cumulative_path, output_conditional_path=output_conditional_path, output_chunk_conditional_path=output_chunk_conditional_path, properties=properties, **kwargs).launch()
except Exception as e:
traceback.print_exc()
raise e
finally:
sys.stdout.flush()
sys.stderr.flush()
def cestats_run(input_cein_path, input_ceout_path, output_dat_path, output_population_path=None, output_chunk_path=None, output_cumulative_path=None, output_conditional_path=None, output_chunk_conditional_path=None, properties=None, **kwargs):
if (output_dat_path is None or os.path.exists(output_dat_path)) and \
(output_population_path is None or os.path.exists(output_population_path)) and \
(output_chunk_path is None or os.path.exists(output_chunk_path)) and \
(output_cumulative_path is None or os.path.exists(output_cumulative_path)) and \
(output_conditional_path is None or os.path.exists(output_conditional_path)) and \
(output_chunk_conditional_path is None or os.path.exists(output_chunk_conditional_path)) and \
True:
print("WARN: Task CestatsRun already executed.")
else:
_cestatsrun( input_cein_path, input_ceout_path, output_dat_path, output_population_path, output_chunk_path, output_cumulative_path, output_conditional_path, output_chunk_conditional_path, properties, **kwargs)
|
[] |
[] |
[
"TASK_TIME_OUT"
] |
[]
|
["TASK_TIME_OUT"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "reinhardt.settings")
print (os.environ["DJANGO_SETTINGS_MODULE"])
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
mypoligonapi/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mypoligonapi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/client/config.go
|
/*
Copyright 2018 the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"encoding/json"
"os"
"path/filepath"
"github.com/pkg/errors"
)
const (
ConfigKeyNamespace = "namespace"
)
// LoadConfig loads the Velero client configuration file and returns it as a map[string]string. If the
// file does not exist, an empty map is returned.
func LoadConfig() (map[string]string, error) {
fileName := configFileName()
_, err := os.Stat(fileName)
if os.IsNotExist(err) {
// If the file isn't there, just return an empty map
return map[string]string{}, nil
}
if err != nil {
// For any other Stat() error, return it
return nil, errors.WithStack(err)
}
configFile, err := os.Open(fileName)
if err != nil {
return nil, errors.WithStack(err)
}
defer configFile.Close()
var config map[string]string
if err := json.NewDecoder(configFile).Decode(&config); err != nil {
return nil, errors.WithStack(err)
}
return config, nil
}
// SaveConfig saves the passed in config map to the Velero client configuration file.
func SaveConfig(config map[string]string) error {
fileName := configFileName()
// Try to make the directory in case it doesn't exist
dir := filepath.Dir(fileName)
if err := os.MkdirAll(dir, 0755); err != nil {
return errors.WithStack(err)
}
configFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
if err != nil {
return errors.WithStack(err)
}
defer configFile.Close()
return json.NewEncoder(configFile).Encode(&config)
}
func configFileName() string {
return filepath.Join(os.Getenv("HOME"), ".config", "velero", "config.json")
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
app2.py
|
'''COPYRIGHT (c) 2020,2021 DEV MASRANI
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
'''
"""
Prerequisites
pip3 install spotipy Flask Flask-Session
// from your [app settings](https://developer.spotify.com/dashboard/applications)
export SPOTIPY_CLIENT_ID=client_id_here
export SPOTIPY_CLIENT_SECRET=client_secret_here
export SPOTIPY_REDIRECT_URI='http://127.0.0.1:8080' // must contain a port
// SPOTIPY_REDIRECT_URI must be added to your [app settings](https://developer.spotify.com/dashboard/applications)
OPTIONAL
// in development environment for debug output
export FLASK_ENV=development
// so that you can invoke the app outside of the file's directory include
export FLASK_APP=/path/to/spotipy/examples/app.py
// on Windows, use `SET` instead of `export`
Run app.py
python3 -m flask run --port=8080
NOTE: If receiving "port already in use" error, try other ports: 5000, 8090, 8888, etc...
(will need to be updated in your Spotify app and SPOTIPY_REDIRECT_URI variable)
"""
import os
from flask import Flask, session, request, redirect, render_template
from flask_session import Session
import spotipy
import uuid
app = Flask(__name__)
app.config['SECRET_KEY'] = os.urandom(64)
app.config['SESSION_TYPE'] = 'filesystem'
app.config['SESSION_FILE_DIR'] = './.flask_session/'
Session(app)
os.environ['SPOTIPY_CLIENT_ID'] = '' #Secrets found in the secrets.py folder
os.environ['SPOTIPY_CLIENT_SECRET'] = ''
os.environ['SPOTIPY_REDIRECT_URI'] = 'http://127.0.0.1/login'
caches_folder = './.spotify_caches/' #Cache path for clearing session
if not os.path.exists(caches_folder):
os.makedirs(caches_folder)
def session_cache_path():
return caches_folder + session.get('uuid') #Gets path
@app.route('/')
def main():
return render_template('home.html') #initial path
@app.route('/options')
def optionselect():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path()) #gets token for OAuth
if not auth_manager.get_cached_token():
return redirect('/') #if no token, redirect back home
return render_template('options.html') #render options.html
@app.route('/result',methods=['POST', 'GET'])
def result():
fail = 0 #counts number of fails to prevent empty playist creation when user hastily preses go
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
if not auth_manager.get_cached_token():
return redirect('/') #if no login token, redirect back to root
try:
playlist_name = request.form['playlist_name'] #attempts to get playlist name from form
except:
playlist_name = "Your Top Songs" #default fallback playlist name
fail += 1
try:
playlist_description = request.form['playlist_description'] #attempts to pull playlist description
playlist_description = playlist_description + ' | Generated with love by https://Playlistify-dev.herokuapp.com'
except:
playlist_description = 'Generated with love by Playlistify'
fail += 1
try:
numsongs = int(request.form['number_of_songs']) #attempts to get number of songs
if (numsongs > 100 or numsongs < 1):
return render_template('options.html', error_message_artists='Number of songs too low or high!')
#if greater than allowed num or less than 0, give error
except:
fail += 1
return render_template('options.html', error_message_artists='Make sure to enter a valid number!')
#if no num, throw error
option = int(request.form['option']) #get option from form
if (option == -1):
fail += 1
return render_template('options.html', error_message_artists='Please select which time period you want!') #error message
if (fail < 4): #if all boxes r empty
generatePlaylist(playlist_name, playlist_description) #do not generate playlist to prevent empty playlists
if(option == 3):
if(numsongs < 3): #if selected option 3(All of the above), has to be 3 minimum
numsongs = 3
addSongs(getSongIDs(number=numsongs))
else:
getTopSongsinPeriod(option,numsongs) #gets top songs and gives IDs for them and adds them
print(playlist_name) #telemetry to see what people are making
print(playlist_description)
print(numsongs)
print(option)
i_frame_url = "https://open.spotify.com/embed/playlist/" + str(getPlaylistID())
session.clear()
#return request.form['option']
return render_template('result.html', thing_one='Done!', thing_two='This playlist has been made in your Spotify Account!', i_frame_url=i_frame_url)
#return 'done'
@app.route('/login')
def index():
if not session.get('uuid'):
# Step 1. Visitor is unknown, give random ID
session['uuid'] = str(uuid.uuid4())
auth_manager = spotipy.oauth2.SpotifyOAuth(
scope='user-read-currently-playing playlist-modify-private user-top-read playlist-modify-public',
cache_path=session_cache_path(),
show_dialog=True)
if request.args.get("code"):
# Step 3. Being redirected from Spotify auth page
auth_manager.get_access_token(request.args.get("code"))
return redirect('/options')
if not auth_manager.get_cached_token():
# Step 2. Display sign in link when no token
auth_url = auth_manager.get_authorize_url()
return redirect(auth_url)
return redirect('/options')
@app.route('/sign_out')
def sign_out():
try:
# Remove the CACHE file (.cache-test) so that a new user can authorize.
os.remove(session_cache_path())
session.clear()
except TypeError:
pass
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
@app.route('/playlists')
def playlists():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
if not auth_manager.get_cached_token():
return redirect('/login')
spotify = spotipy.Spotify(auth_manager=auth_manager)
return spotify.current_user_playlists()
@app.route('/currently_playing')
def currently_playing():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
if not auth_manager.get_cached_token():
return redirect('/login')
spotify = spotipy.Spotify(auth_manager=auth_manager)
track = spotify.current_user_playing_track()
if not track is None:
return track
return "No track currently playing."
@app.route('/current_user')
def current_user():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
if not auth_manager.get_cached_token():
return redirect('/login')
spotify = spotipy.Spotify(auth_manager=auth_manager)
return spotify.current_user()
def generatePlaylist(name,description):
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
sp1 = spotipy.Spotify(auth_manager=auth_manager)
sp1.user_playlist_create(user=get_user_id(), name=name, description=description)
def get_user_id():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
sp1 = spotipy.Spotify(auth_manager=auth_manager)
return str(sp1.me()['id'])
def addSongs(item):
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
sp1 = spotipy.Spotify(auth_manager=auth_manager)
sp1.playlist_add_items(playlist_id=getPlaylistID(),items=item)
def getPlaylistID():
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
sp1 = spotipy.Spotify(auth_manager=auth_manager)
user_playlist = sp1.user_playlists(user=get_user_id(),limit=1,offset=0)
# for item in user_playlist:
# print(item)
playlist_Data = user_playlist['items'][0]
playlist_ID = playlist_Data['id']
return playlist_ID
def getSongIDs(number):
songIDs = []
number = int(number//3)
for i in range(3):
templist = getTopSongs(i,number)
for song in templist['items']:
id = song['id']
songIDs.append(id)
return songIDs
def getTopSongs(index, limit):
length = ['short_term', 'medium_term', 'long_term']
auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path())
sp1 = spotipy.Spotify(auth_manager=auth_manager)
topsongs = sp1.current_user_top_tracks(time_range=length[index], limit=limit)
return topsongs
def getTopSongsinPeriod(option,numsongs):
songIDs = []
templist = getTopSongs(option, numsongs)
for song in templist['items']:
id = song['id']
songIDs.append(id)
addSongs(songIDs)
'''
Following lines allow application to be run more conveniently with
`python app.py` (Make sure you're using python3)
(Also includes directive to leverage pythons threading capacity.)
'''
if __name__ == '__main__':
app.run(threaded=True, port=int(os.environ.get("PORT", 8080)))
|
[] |
[] |
[
"PORT",
"SPOTIPY_CLIENT_SECRET",
"SPOTIPY_CLIENT_ID",
"SPOTIPY_REDIRECT_URI"
] |
[]
|
["PORT", "SPOTIPY_CLIENT_SECRET", "SPOTIPY_CLIENT_ID", "SPOTIPY_REDIRECT_URI"]
|
python
| 4 | 0 | |
controllers/finalizers/suit_test.go
|
// Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package finalizers
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/go-logr/logr"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"go.uber.org/fx"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/cmd/chaos-controller-manager/provider"
"github.com/chaos-mesh/chaos-mesh/controllers/schedule/utils"
"github.com/chaos-mesh/chaos-mesh/controllers/types"
"github.com/chaos-mesh/chaos-mesh/controllers/utils/test"
"k8s.io/client-go/rest"
"k8s.io/kubectl/pkg/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var app *fx.App
var k8sClient client.Client
var lister *utils.ActiveLister
var config *rest.Config
var testEnv *envtest.Environment
var setupLog = ctrl.Log.WithName("setup")
func TestSchedule(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Schedule suit",
[]Reporter{envtest.NewlineReporter{}})
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
By("bootstrapping test environment")
t := true
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
testEnv = &envtest.Environment{
UseExistingCluster: &t,
}
} else {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
}
}
err := v1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
config, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(config).ToNot(BeNil())
k8sClient, err = client.New(config, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
app = fx.New(
fx.Options(
fx.Provide(
provider.NewOption,
provider.NewClient,
provider.NewReader,
provider.NewLogger,
provider.NewAuthCli,
provider.NewScheme,
test.NewTestManager,
fx.Annotated{
Group: "controller",
Target: NewController,
},
),
fx.Supply(config),
types.ChaosObjects,
),
fx.Invoke(Run),
)
startCtx, cancel := context.WithTimeout(context.Background(), app.StartTimeout())
defer cancel()
if err := app.Start(startCtx); err != nil {
setupLog.Error(err, "fail to start manager")
}
Expect(err).ToNot(HaveOccurred())
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
stopCtx, cancel := context.WithTimeout(context.Background(), app.StopTimeout())
defer cancel()
if err := app.Stop(stopCtx); err != nil {
setupLog.Error(err, "fail to stop manager")
}
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
type RunParams struct {
fx.In
Mgr ctrl.Manager
Logger logr.Logger
Controllers []types.Controller `group:"controller"`
Objs []types.Object `group:"objs"`
}
func Run(params RunParams) error {
lister = utils.NewActiveLister(k8sClient, params.Logger)
return nil
}
|
[
"\"TEST_USE_EXISTING_CLUSTER\""
] |
[] |
[
"TEST_USE_EXISTING_CLUSTER"
] |
[]
|
["TEST_USE_EXISTING_CLUSTER"]
|
go
| 1 | 0 | |
libpod/image/pull.go
|
package image
import (
"context"
"fmt"
"io"
"os"
"strings"
cp "github.com/containers/image/copy"
"github.com/containers/image/directory"
"github.com/containers/image/docker"
dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/docker/reference"
"github.com/containers/image/docker/tarfile"
ociarchive "github.com/containers/image/oci/archive"
"github.com/containers/image/pkg/sysregistries"
is "github.com/containers/image/storage"
"github.com/containers/image/tarball"
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/pkg/errors"
"github.com/projectatomic/libpod/pkg/registries"
"github.com/projectatomic/libpod/pkg/util"
"github.com/sirupsen/logrus"
)
var (
// DockerArchive is the transport we prepend to an image name
// when saving to docker-archive
DockerArchive = dockerarchive.Transport.Name()
// OCIArchive is the transport we prepend to an image name
// when saving to oci-archive
OCIArchive = ociarchive.Transport.Name()
// DirTransport is the transport for pushing and pulling
// images to and from a directory
DirTransport = directory.Transport.Name()
// TransportNames are the supported transports in string form
TransportNames = [...]string{DefaultTransport, DockerArchive, OCIArchive, "ostree:", "dir:"}
// TarballTransport is the transport for importing a tar archive
// and creating a filesystem image
TarballTransport = tarball.Transport.Name()
// DockerTransport is the transport for docker registries
DockerTransport = docker.Transport.Name() + "://"
// AtomicTransport is the transport for atomic registries
AtomicTransport = "atomic"
// DefaultTransport is a prefix that we apply to an image name
DefaultTransport = DockerTransport
)
type pullStruct struct {
image string
srcRef types.ImageReference
dstRef types.ImageReference
}
func (ir *Runtime) getPullStruct(srcRef types.ImageReference, destName string) (*pullStruct, error) {
reference := destName
if srcRef.DockerReference() != nil {
reference = srcRef.DockerReference().String()
}
destRef, err := is.Transport.ParseStoreReference(ir.store, reference)
if err != nil {
return nil, errors.Wrapf(err, "error parsing dest reference name")
}
return &pullStruct{
image: destName,
srcRef: srcRef,
dstRef: destRef,
}, nil
}
// returns a list of pullStruct with the srcRef and DstRef based on the transport being used
func (ir *Runtime) getPullListFromRef(ctx context.Context, srcRef types.ImageReference, imgName string, sc *types.SystemContext) ([]*pullStruct, error) {
var pullStructs []*pullStruct
splitArr := strings.Split(imgName, ":")
archFile := splitArr[len(splitArr)-1]
// supports pulling from docker-archive, oci, and registries
if srcRef.Transport().Name() == DockerArchive {
tarSource, err := tarfile.NewSourceFromFile(archFile)
if err != nil {
return nil, err
}
manifest, err := tarSource.LoadTarManifest()
if err != nil {
return nil, errors.Wrapf(err, "error retrieving manifest.json")
}
// to pull the first image stored in the tar file
if len(manifest) == 0 {
// use the hex of the digest if no manifest is found
reference, err := getImageDigest(ctx, srcRef, sc)
if err != nil {
return nil, err
}
pullInfo, err := ir.getPullStruct(srcRef, reference)
if err != nil {
return nil, err
}
pullStructs = append(pullStructs, pullInfo)
} else {
var dest string
if len(manifest[0].RepoTags) > 0 {
dest = manifest[0].RepoTags[0]
} else {
// If the input image has no repotags, we need to feed it a dest anyways
dest, err = getImageDigest(ctx, srcRef, sc)
if err != nil {
return nil, err
}
}
pullInfo, err := ir.getPullStruct(srcRef, dest)
if err != nil {
return nil, err
}
pullStructs = append(pullStructs, pullInfo)
}
} else if srcRef.Transport().Name() == OCIArchive {
// retrieve the manifest from index.json to access the image name
manifest, err := ociarchive.LoadManifestDescriptor(srcRef)
if err != nil {
return nil, errors.Wrapf(err, "error loading manifest for %q", srcRef)
}
var dest string
if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
// If the input image has no image.ref.name, we need to feed it a dest anyways
// use the hex of the digest
dest, err = getImageDigest(ctx, srcRef, sc)
if err != nil {
return nil, errors.Wrapf(err, "error getting image digest; image reference not found")
}
} else {
dest = manifest.Annotations["org.opencontainers.image.ref.name"]
}
pullInfo, err := ir.getPullStruct(srcRef, dest)
if err != nil {
return nil, err
}
pullStructs = append(pullStructs, pullInfo)
} else if srcRef.Transport().Name() == DirTransport {
// supports pull from a directory
image := splitArr[1]
// remove leading "/"
if image[:1] == "/" {
image = image[1:]
}
pullInfo, err := ir.getPullStruct(srcRef, image)
if err != nil {
return nil, err
}
pullStructs = append(pullStructs, pullInfo)
} else {
pullInfo, err := ir.getPullStruct(srcRef, imgName)
if err != nil {
return nil, err
}
pullStructs = append(pullStructs, pullInfo)
}
return pullStructs, nil
}
// pullImage pulls an image from configured registries
// By default, only the latest tag (or a specific tag if requested) will be
// pulled.
func (i *Image) pullImage(ctx context.Context, writer io.Writer, authfile, signaturePolicyPath string, signingOptions SigningOptions, dockerOptions *DockerRegistryOptions, forceSecure bool) (string, error) {
// pullImage copies the image from the source to the destination
var pullStructs []*pullStruct
sc := GetSystemContext(signaturePolicyPath, authfile, false)
srcRef, err := alltransports.ParseImageName(i.InputName)
if err != nil {
// could be trying to pull from registry with short name
pullStructs, err = i.createNamesToPull()
if err != nil {
return "", errors.Wrap(err, "error getting default registries to try")
}
} else {
pullStructs, err = i.imageruntime.getPullListFromRef(ctx, srcRef, i.InputName, sc)
if err != nil {
return "", errors.Wrapf(err, "error getting pullStruct info to pull image %q", i.InputName)
}
}
policyContext, err := getPolicyContext(sc)
if err != nil {
return "", err
}
defer policyContext.Destroy()
insecureRegistries, err := registries.GetInsecureRegistries()
if err != nil {
return "", err
}
for _, imageInfo := range pullStructs {
copyOptions := getCopyOptions(writer, signaturePolicyPath, dockerOptions, nil, signingOptions, authfile, "", false)
if strings.HasPrefix(DockerTransport, imageInfo.srcRef.Transport().Name()) {
imgRef, err := reference.Parse(imageInfo.srcRef.DockerReference().String())
if err != nil {
return "", err
}
registry := reference.Domain(imgRef.(reference.Named))
if util.StringInSlice(registry, insecureRegistries) && !forceSecure {
copyOptions.SourceCtx.DockerInsecureSkipTLSVerify = true
logrus.Info(fmt.Sprintf("%s is an insecure registry; pulling with tls-verify=false", registry))
}
}
// Print the following statement only when pulling from a docker or atomic registry
if writer != nil && (strings.HasPrefix(DockerTransport, imageInfo.srcRef.Transport().Name()) || imageInfo.srcRef.Transport().Name() == AtomicTransport) {
io.WriteString(writer, fmt.Sprintf("Trying to pull %s...", imageInfo.image))
}
if err = cp.Image(ctx, policyContext, imageInfo.dstRef, imageInfo.srcRef, copyOptions); err != nil {
if writer != nil {
io.WriteString(writer, "Failed\n")
}
} else {
return imageInfo.image, nil
}
}
return "", errors.Wrapf(err, "error pulling image from")
}
// createNamesToPull looks at a decomposed image and determines the possible
// images names to try pulling in combination with the registries.conf file as well
func (i *Image) createNamesToPull() ([]*pullStruct, error) {
var pullNames []*pullStruct
decomposedImage, err := decompose(i.InputName)
if err != nil {
return nil, err
}
if decomposedImage.hasRegistry {
srcRef, err := alltransports.ParseImageName(decomposedImage.assembleWithTransport())
if err != nil {
return nil, errors.Wrapf(err, "unable to parse '%s'", i.InputName)
}
ps := pullStruct{
image: i.InputName,
srcRef: srcRef,
}
pullNames = append(pullNames, &ps)
} else {
registryConfigPath := ""
envOverride := os.Getenv("REGISTRIES_CONFIG_PATH")
if len(envOverride) > 0 {
registryConfigPath = envOverride
}
searchRegistries, err := sysregistries.GetRegistries(&types.SystemContext{SystemRegistriesConfPath: registryConfigPath})
if err != nil {
return nil, err
}
for _, registry := range searchRegistries {
decomposedImage.registry = registry
srcRef, err := alltransports.ParseImageName(decomposedImage.assembleWithTransport())
if err != nil {
return nil, errors.Wrapf(err, "unable to parse '%s'", i.InputName)
}
ps := pullStruct{
image: decomposedImage.assemble(),
srcRef: srcRef,
}
pullNames = append(pullNames, &ps)
}
}
for _, pStruct := range pullNames {
destRef, err := is.Transport.ParseStoreReference(i.imageruntime.store, pStruct.image)
if err != nil {
return nil, errors.Wrapf(err, "error parsing dest reference name")
}
pStruct.dstRef = destRef
}
return pullNames, nil
}
|
[
"\"REGISTRIES_CONFIG_PATH\""
] |
[] |
[
"REGISTRIES_CONFIG_PATH"
] |
[]
|
["REGISTRIES_CONFIG_PATH"]
|
go
| 1 | 0 | |
src/github.com/docker/docker/daemon/info.go
|
package daemon
import (
"os"
"runtime"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/registry"
"github.com/docker/docker/utils"
)
func (daemon *Daemon) SystemInfo() (*types.Info, error) {
images := daemon.Graph().Map()
var imgcount int
if images == nil {
imgcount = 0
} else {
imgcount = len(images)
}
kernelVersion := "<unknown>"
if kv, err := kernel.GetKernelVersion(); err == nil {
kernelVersion = kv.String()
}
operatingSystem := "<unknown>"
if s, err := operatingsystem.GetOperatingSystem(); err == nil {
operatingSystem = s
}
// Don't do containerized check on Windows
if runtime.GOOS != "windows" {
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
}
meminfo, err := system.ReadMemInfo()
if err != nil {
logrus.Errorf("Could not read system memory info: %v", err)
}
// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
initPath := utils.DockerInitPath("")
if initPath == "" {
// if that fails, we'll just return the path from the daemon
initPath = daemon.SystemInitPath()
}
v := &types.Info{
ID: daemon.ID,
Containers: len(daemon.List()),
Images: imgcount,
Driver: daemon.GraphDriver().String(),
DriverStatus: daemon.GraphDriver().Status(),
IPv4Forwarding: !daemon.SystemConfig().IPv4ForwardingDisabled,
BridgeNfIptables: !daemon.SystemConfig().BridgeNfCallIptablesDisabled,
BridgeNfIp6tables: !daemon.SystemConfig().BridgeNfCallIp6tablesDisabled,
Debug: os.Getenv("DEBUG") != "",
NFd: fileutils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
ExecutionDriver: daemon.ExecutionDriver().Name(),
LoggingDriver: daemon.defaultLogConfig.Type,
NEventsListener: daemon.EventsService.SubscribersCount(),
KernelVersion: kernelVersion,
OperatingSystem: operatingSystem,
IndexServerAddress: registry.IndexServer,
RegistryConfig: daemon.RegistryService.Config,
InitSha1: dockerversion.INITSHA1,
InitPath: initPath,
NCPU: runtime.NumCPU(),
MemTotal: meminfo.MemTotal,
DockerRootDir: daemon.Config().Root,
Labels: daemon.Config().Labels,
ExperimentalBuild: utils.ExperimentalBuild(),
}
// TODO Windows. Refactor this more once sysinfo is refactored into
// platform specific code. On Windows, sysinfo.cgroupMemInfo and
// sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if
// an attempt is made to access through them.
if runtime.GOOS != "windows" {
v.MemoryLimit = daemon.SystemConfig().MemoryLimit
v.SwapLimit = daemon.SystemConfig().SwapLimit
v.OomKillDisable = daemon.SystemConfig().OomKillDisable
v.CpuCfsPeriod = daemon.SystemConfig().CpuCfsPeriod
v.CpuCfsQuota = daemon.SystemConfig().CpuCfsQuota
}
if httpProxy := os.Getenv("http_proxy"); httpProxy != "" {
v.HttpProxy = httpProxy
}
if httpsProxy := os.Getenv("https_proxy"); httpsProxy != "" {
v.HttpsProxy = httpsProxy
}
if noProxy := os.Getenv("no_proxy"); noProxy != "" {
v.NoProxy = noProxy
}
if hostname, err := os.Hostname(); err == nil {
v.Name = hostname
}
return v, nil
}
|
[
"\"DEBUG\"",
"\"http_proxy\"",
"\"https_proxy\"",
"\"no_proxy\""
] |
[] |
[
"https_proxy",
"http_proxy",
"no_proxy",
"DEBUG"
] |
[]
|
["https_proxy", "http_proxy", "no_proxy", "DEBUG"]
|
go
| 4 | 0 | |
src/cmd/compile/internal/ssa/rewrite.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
import (
"cmd/compile/internal/logopt"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
"cmd/internal/objabi"
"cmd/internal/src"
"encoding/binary"
"fmt"
"io"
"math"
"math/bits"
"os"
"path/filepath"
)
func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) {
// repeat rewrites until we find no more rewrites
pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
pendingLines.clear()
debug := f.pass.debug
if debug > 1 {
fmt.Printf("%s: rewriting for %s\n", f.pass.name, f.Name)
}
for {
change := false
for _, b := range f.Blocks {
var b0 *Block
if debug > 1 {
b0 = new(Block)
*b0 = *b
b0.Succs = append([]Edge{}, b.Succs...) // make a new copy, not aliasing
}
for i, c := range b.ControlValues() {
for c.Op == OpCopy {
c = c.Args[0]
b.ReplaceControl(i, c)
}
}
if rb(b) {
change = true
if debug > 1 {
fmt.Printf("rewriting %s -> %s\n", b0.LongString(), b.LongString())
}
}
for j, v := range b.Values {
var v0 *Value
if debug > 1 {
v0 = new(Value)
*v0 = *v
v0.Args = append([]*Value{}, v.Args...) // make a new copy, not aliasing
}
vchange := phielimValue(v)
if vchange && debug > 1 {
fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
}
// Eliminate copy inputs.
// If any copy input becomes unused, mark it
// as invalid and discard its argument. Repeat
// recursively on the discarded argument.
// This phase helps remove phantom "dead copy" uses
// of a value so that a x.Uses==1 rule condition
// fires reliably.
for i, a := range v.Args {
if a.Op != OpCopy {
continue
}
aa := copySource(a)
v.SetArg(i, aa)
// If a, a copy, has a line boundary indicator, attempt to find a new value
// to hold it. The first candidate is the value that will replace a (aa),
// if it shares the same block and line and is eligible.
// The second option is v, which has a as an input. Because aa is earlier in
// the data flow, it is the better choice.
if a.Pos.IsStmt() == src.PosIsStmt {
if aa.Block == a.Block && aa.Pos.Line() == a.Pos.Line() && aa.Pos.IsStmt() != src.PosNotStmt {
aa.Pos = aa.Pos.WithIsStmt()
} else if v.Block == a.Block && v.Pos.Line() == a.Pos.Line() && v.Pos.IsStmt() != src.PosNotStmt {
v.Pos = v.Pos.WithIsStmt()
} else {
// Record the lost line and look for a new home after all rewrites are complete.
// TODO: it's possible (in FOR loops, in particular) for statement boundaries for the same
// line to appear in more than one block, but only one block is stored, so if both end
// up here, then one will be lost.
pendingLines.set(a.Pos, int32(a.Block.ID))
}
a.Pos = a.Pos.WithNotStmt()
}
vchange = true
for a.Uses == 0 {
b := a.Args[0]
a.reset(OpInvalid)
a = b
}
}
if vchange && debug > 1 {
fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
}
// apply rewrite function
if rv(v) {
vchange = true
// If value changed to a poor choice for a statement boundary, move the boundary
if v.Pos.IsStmt() == src.PosIsStmt {
if k := nextGoodStatementIndex(v, j, b); k != j {
v.Pos = v.Pos.WithNotStmt()
b.Values[k].Pos = b.Values[k].Pos.WithIsStmt()
}
}
}
change = change || vchange
if vchange && debug > 1 {
fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
}
}
}
if !change {
break
}
}
// remove clobbered values
for _, b := range f.Blocks {
j := 0
for i, v := range b.Values {
vl := v.Pos
if v.Op == OpInvalid {
if v.Pos.IsStmt() == src.PosIsStmt {
pendingLines.set(vl, int32(b.ID))
}
f.freeValue(v)
continue
}
if v.Pos.IsStmt() != src.PosNotStmt && pendingLines.get(vl) == int32(b.ID) {
pendingLines.remove(vl)
v.Pos = v.Pos.WithIsStmt()
}
if i != j {
b.Values[j] = v
}
j++
}
if pendingLines.get(b.Pos) == int32(b.ID) {
b.Pos = b.Pos.WithIsStmt()
pendingLines.remove(b.Pos)
}
b.truncateValues(j)
}
}
// Common functions called from rewriting rules
func is64BitFloat(t *types.Type) bool {
return t.Size() == 8 && t.IsFloat()
}
func is32BitFloat(t *types.Type) bool {
return t.Size() == 4 && t.IsFloat()
}
func is64BitInt(t *types.Type) bool {
return t.Size() == 8 && t.IsInteger()
}
func is32BitInt(t *types.Type) bool {
return t.Size() == 4 && t.IsInteger()
}
func is16BitInt(t *types.Type) bool {
return t.Size() == 2 && t.IsInteger()
}
func is8BitInt(t *types.Type) bool {
return t.Size() == 1 && t.IsInteger()
}
func isPtr(t *types.Type) bool {
return t.IsPtrShaped()
}
func isSigned(t *types.Type) bool {
return t.IsSigned()
}
// mergeSym merges two symbolic offsets. There is no real merging of
// offsets, we just pick the non-nil one.
func mergeSym(x, y interface{}) interface{} {
if x == nil {
return y
}
if y == nil {
return x
}
panic(fmt.Sprintf("mergeSym with two non-nil syms %s %s", x, y))
}
func canMergeSym(x, y interface{}) bool {
return x == nil || y == nil
}
func mergeSymTyped(x, y Sym) Sym {
if x == nil {
return y
}
if y == nil {
return x
}
panic(fmt.Sprintf("mergeSym with two non-nil syms %v %v", x, y))
}
// canMergeLoadClobber reports whether the load can be merged into target without
// invalidating the schedule.
// It also checks that the other non-load argument x is something we
// are ok with clobbering.
func canMergeLoadClobber(target, load, x *Value) bool {
// The register containing x is going to get clobbered.
// Don't merge if we still need the value of x.
// We don't have liveness information here, but we can
// approximate x dying with:
// 1) target is x's only use.
// 2) target is not in a deeper loop than x.
if x.Uses != 1 {
return false
}
loopnest := x.Block.Func.loopnest()
loopnest.calculateDepths()
if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) {
return false
}
return canMergeLoad(target, load)
}
// canMergeLoad reports whether the load can be merged into target without
// invalidating the schedule.
func canMergeLoad(target, load *Value) bool {
if target.Block.ID != load.Block.ID {
// If the load is in a different block do not merge it.
return false
}
// We can't merge the load into the target if the load
// has more than one use.
if load.Uses != 1 {
return false
}
mem := load.MemoryArg()
// We need the load's memory arg to still be alive at target. That
// can't be the case if one of target's args depends on a memory
// state that is a successor of load's memory arg.
//
// For example, it would be invalid to merge load into target in
// the following situation because newmem has killed oldmem
// before target is reached:
// load = read ... oldmem
// newmem = write ... oldmem
// arg0 = read ... newmem
// target = add arg0 load
//
// If the argument comes from a different block then we can exclude
// it immediately because it must dominate load (which is in the
// same block as target).
var args []*Value
for _, a := range target.Args {
if a != load && a.Block.ID == target.Block.ID {
args = append(args, a)
}
}
// memPreds contains memory states known to be predecessors of load's
// memory state. It is lazily initialized.
var memPreds map[*Value]bool
for i := 0; len(args) > 0; i++ {
const limit = 100
if i >= limit {
// Give up if we have done a lot of iterations.
return false
}
v := args[len(args)-1]
args = args[:len(args)-1]
if target.Block.ID != v.Block.ID {
// Since target and load are in the same block
// we can stop searching when we leave the block.
continue
}
if v.Op == OpPhi {
// A Phi implies we have reached the top of the block.
// The memory phi, if it exists, is always
// the first logical store in the block.
continue
}
if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
// We could handle this situation however it is likely
// to be very rare.
return false
}
if v.Op.SymEffect()&SymAddr != 0 {
// This case prevents an operation that calculates the
// address of a local variable from being forced to schedule
// before its corresponding VarDef.
// See issue 28445.
// v1 = LOAD ...
// v2 = VARDEF
// v3 = LEAQ
// v4 = CMPQ v1 v3
// We don't want to combine the CMPQ with the load, because
// that would force the CMPQ to schedule before the VARDEF, which
// in turn requires the LEAQ to schedule before the VARDEF.
return false
}
if v.Type.IsMemory() {
if memPreds == nil {
// Initialise a map containing memory states
// known to be predecessors of load's memory
// state.
memPreds = make(map[*Value]bool)
m := mem
const limit = 50
for i := 0; i < limit; i++ {
if m.Op == OpPhi {
// The memory phi, if it exists, is always
// the first logical store in the block.
break
}
if m.Block.ID != target.Block.ID {
break
}
if !m.Type.IsMemory() {
break
}
memPreds[m] = true
if len(m.Args) == 0 {
break
}
m = m.MemoryArg()
}
}
// We can merge if v is a predecessor of mem.
//
// For example, we can merge load into target in the
// following scenario:
// x = read ... v
// mem = write ... v
// load = read ... mem
// target = add x load
if memPreds[v] {
continue
}
return false
}
if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem {
// If v takes mem as an input then we know mem
// is valid at this point.
continue
}
for _, a := range v.Args {
if target.Block.ID == a.Block.ID {
args = append(args, a)
}
}
}
return true
}
// symNamed reports whether sym's name is name.
func symNamed(sym Sym, name string) bool {
return sym.String() == name
}
// isSameSym reports whether sym is the same as the given named symbol
func isSameSym(sym interface{}, name string) bool {
s, ok := sym.(fmt.Stringer)
return ok && s.String() == name
}
// nlz returns the number of leading zeros.
func nlz64(x int64) int { return bits.LeadingZeros64(uint64(x)) }
func nlz32(x int32) int { return bits.LeadingZeros32(uint32(x)) }
func nlz16(x int16) int { return bits.LeadingZeros16(uint16(x)) }
func nlz8(x int8) int { return bits.LeadingZeros8(uint8(x)) }
// ntzX returns the number of trailing zeros.
func ntz64(x int64) int { return bits.TrailingZeros64(uint64(x)) }
func ntz32(x int32) int { return bits.TrailingZeros32(uint32(x)) }
func ntz16(x int16) int { return bits.TrailingZeros16(uint16(x)) }
func ntz8(x int8) int { return bits.TrailingZeros8(uint8(x)) }
func oneBit(x int64) bool { return x&(x-1) == 0 && x != 0 }
func oneBit8(x int8) bool { return x&(x-1) == 0 && x != 0 }
func oneBit16(x int16) bool { return x&(x-1) == 0 && x != 0 }
func oneBit32(x int32) bool { return x&(x-1) == 0 && x != 0 }
func oneBit64(x int64) bool { return x&(x-1) == 0 && x != 0 }
// nto returns the number of trailing ones.
func nto(x int64) int64 {
return int64(ntz64(^x))
}
// log2 returns logarithm in base 2 of uint64(n), with log2(0) = -1.
// Rounds down.
func log2(n int64) int64 {
return int64(bits.Len64(uint64(n))) - 1
}
// logX returns logarithm of n base 2.
// n must be a positive power of 2 (isPowerOfTwoX returns true).
func log8(n int8) int64 {
return int64(bits.Len8(uint8(n))) - 1
}
func log16(n int16) int64 {
return int64(bits.Len16(uint16(n))) - 1
}
func log32(n int32) int64 {
return int64(bits.Len32(uint32(n))) - 1
}
func log64(n int64) int64 {
return int64(bits.Len64(uint64(n))) - 1
}
// log2uint32 returns logarithm in base 2 of uint32(n), with log2(0) = -1.
// Rounds down.
func log2uint32(n int64) int64 {
return int64(bits.Len32(uint32(n))) - 1
}
// isPowerOfTwo reports whether n is a power of 2.
func isPowerOfTwo(n int64) bool {
return n > 0 && n&(n-1) == 0
}
func isPowerOfTwo8(n int8) bool {
return n > 0 && n&(n-1) == 0
}
func isPowerOfTwo16(n int16) bool {
return n > 0 && n&(n-1) == 0
}
func isPowerOfTwo32(n int32) bool {
return n > 0 && n&(n-1) == 0
}
func isPowerOfTwo64(n int64) bool {
return n > 0 && n&(n-1) == 0
}
// isUint64PowerOfTwo reports whether uint64(n) is a power of 2.
func isUint64PowerOfTwo(in int64) bool {
n := uint64(in)
return n > 0 && n&(n-1) == 0
}
// isUint32PowerOfTwo reports whether uint32(n) is a power of 2.
func isUint32PowerOfTwo(in int64) bool {
n := uint64(uint32(in))
return n > 0 && n&(n-1) == 0
}
// is32Bit reports whether n can be represented as a signed 32 bit integer.
func is32Bit(n int64) bool {
return n == int64(int32(n))
}
// is16Bit reports whether n can be represented as a signed 16 bit integer.
func is16Bit(n int64) bool {
return n == int64(int16(n))
}
// is8Bit reports whether n can be represented as a signed 8 bit integer.
func is8Bit(n int64) bool {
return n == int64(int8(n))
}
// isU8Bit reports whether n can be represented as an unsigned 8 bit integer.
func isU8Bit(n int64) bool {
return n == int64(uint8(n))
}
// isU12Bit reports whether n can be represented as an unsigned 12 bit integer.
func isU12Bit(n int64) bool {
return 0 <= n && n < (1<<12)
}
// isU16Bit reports whether n can be represented as an unsigned 16 bit integer.
func isU16Bit(n int64) bool {
return n == int64(uint16(n))
}
// isU32Bit reports whether n can be represented as an unsigned 32 bit integer.
func isU32Bit(n int64) bool {
return n == int64(uint32(n))
}
// is20Bit reports whether n can be represented as a signed 20 bit integer.
func is20Bit(n int64) bool {
return -(1<<19) <= n && n < (1<<19)
}
// b2i translates a boolean value to 0 or 1 for assigning to auxInt.
func b2i(b bool) int64 {
if b {
return 1
}
return 0
}
// b2i32 translates a boolean value to 0 or 1.
func b2i32(b bool) int32 {
if b {
return 1
}
return 0
}
// shiftIsBounded reports whether (left/right) shift Value v is known to be bounded.
// A shift is bounded if it is shifting by less than the width of the shifted value.
func shiftIsBounded(v *Value) bool {
return v.AuxInt != 0
}
// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern
// of the mantissa. It will panic if the truncation results in lost information.
func truncate64Fto32F(f float64) float32 {
if !isExactFloat32(f) {
panic("truncate64Fto32F: truncation is not exact")
}
if !math.IsNaN(f) {
return float32(f)
}
// NaN bit patterns aren't necessarily preserved across conversion
// instructions so we need to do the conversion manually.
b := math.Float64bits(f)
m := b & ((1 << 52) - 1) // mantissa (a.k.a. significand)
// | sign | exponent | mantissa |
r := uint32(((b >> 32) & (1 << 31)) | 0x7f800000 | (m >> (52 - 23)))
return math.Float32frombits(r)
}
// extend32Fto64F converts a float32 value to a float64 value preserving the bit
// pattern of the mantissa.
func extend32Fto64F(f float32) float64 {
if !math.IsNaN(float64(f)) {
return float64(f)
}
// NaN bit patterns aren't necessarily preserved across conversion
// instructions so we need to do the conversion manually.
b := uint64(math.Float32bits(f))
// | sign | exponent | mantissa |
r := ((b << 32) & (1 << 63)) | (0x7ff << 52) | ((b & 0x7fffff) << (52 - 23))
return math.Float64frombits(r)
}
// DivisionNeedsFixUp reports whether the division needs fix-up code.
func DivisionNeedsFixUp(v *Value) bool {
return v.AuxInt == 0
}
// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
func auxFrom64F(f float64) int64 {
if f != f {
panic("can't encode a NaN in AuxInt field")
}
return int64(math.Float64bits(f))
}
// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
func auxFrom32F(f float32) int64 {
if f != f {
panic("can't encode a NaN in AuxInt field")
}
return int64(math.Float64bits(extend32Fto64F(f)))
}
// auxTo32F decodes a float32 from the AuxInt value provided.
func auxTo32F(i int64) float32 {
return truncate64Fto32F(math.Float64frombits(uint64(i)))
}
// auxTo64F decodes a float64 from the AuxInt value provided.
func auxTo64F(i int64) float64 {
return math.Float64frombits(uint64(i))
}
func auxIntToBool(i int64) bool {
if i == 0 {
return false
}
return true
}
func auxIntToInt8(i int64) int8 {
return int8(i)
}
func auxIntToInt16(i int64) int16 {
return int16(i)
}
func auxIntToInt32(i int64) int32 {
return int32(i)
}
func auxIntToInt64(i int64) int64 {
return i
}
func auxIntToUint8(i int64) uint8 {
return uint8(i)
}
func auxIntToFloat32(i int64) float32 {
return float32(math.Float64frombits(uint64(i)))
}
func auxIntToFloat64(i int64) float64 {
return math.Float64frombits(uint64(i))
}
func auxIntToValAndOff(i int64) ValAndOff {
return ValAndOff(i)
}
func auxIntToInt128(x int64) int128 {
if x != 0 {
panic("nonzero int128 not allowed")
}
return 0
}
func auxIntToFlagConstant(x int64) flagConstant {
return flagConstant(x)
}
func boolToAuxInt(b bool) int64 {
if b {
return 1
}
return 0
}
func int8ToAuxInt(i int8) int64 {
return int64(i)
}
func int16ToAuxInt(i int16) int64 {
return int64(i)
}
func int32ToAuxInt(i int32) int64 {
return int64(i)
}
func int64ToAuxInt(i int64) int64 {
return int64(i)
}
func uint8ToAuxInt(i uint8) int64 {
return int64(int8(i))
}
func float32ToAuxInt(f float32) int64 {
return int64(math.Float64bits(float64(f)))
}
func float64ToAuxInt(f float64) int64 {
return int64(math.Float64bits(f))
}
func valAndOffToAuxInt(v ValAndOff) int64 {
return int64(v)
}
func int128ToAuxInt(x int128) int64 {
if x != 0 {
panic("nonzero int128 not allowed")
}
return 0
}
func flagConstantToAuxInt(x flagConstant) int64 {
return int64(x)
}
func auxToString(i interface{}) string {
return i.(string)
}
func auxToSym(i interface{}) Sym {
// TODO: kind of a hack - allows nil interface through
s, _ := i.(Sym)
return s
}
func auxToType(i interface{}) *types.Type {
return i.(*types.Type)
}
func auxToS390xCCMask(i interface{}) s390x.CCMask {
return i.(s390x.CCMask)
}
func auxToS390xRotateParams(i interface{}) s390x.RotateParams {
return i.(s390x.RotateParams)
}
func stringToAux(s string) interface{} {
return s
}
func symToAux(s Sym) interface{} {
return s
}
func typeToAux(t *types.Type) interface{} {
return t
}
func s390xCCMaskToAux(c s390x.CCMask) interface{} {
return c
}
func s390xRotateParamsToAux(r s390x.RotateParams) interface{} {
return r
}
func cCopToAux(o Op) interface{} {
return o
}
// uaddOvf reports whether unsigned a+b would overflow.
func uaddOvf(a, b int64) bool {
return uint64(a)+uint64(b) < uint64(a)
}
// de-virtualize an InterCall
// 'sym' is the symbol for the itab
func devirt(v *Value, sym Sym, offset int64) *obj.LSym {
f := v.Block.Func
n, ok := sym.(*obj.LSym)
if !ok {
return nil
}
lsym := f.fe.DerefItab(n, offset)
if f.pass.debug > 0 {
if lsym != nil {
f.Warnl(v.Pos, "de-virtualizing call")
} else {
f.Warnl(v.Pos, "couldn't de-virtualize call")
}
}
return lsym
}
// isSamePtr reports whether p1 and p2 point to the same address.
func isSamePtr(p1, p2 *Value) bool {
if p1 == p2 {
return true
}
if p1.Op != p2.Op {
return false
}
switch p1.Op {
case OpOffPtr:
return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0])
case OpAddr, OpLocalAddr:
// OpAddr's 0th arg is either OpSP or OpSB, which means that it is uniquely identified by its Op.
// Checking for value equality only works after [z]cse has run.
return p1.Aux == p2.Aux && p1.Args[0].Op == p2.Args[0].Op
case OpAddPtr:
return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0])
}
return false
}
func isStackPtr(v *Value) bool {
for v.Op == OpOffPtr || v.Op == OpAddPtr {
v = v.Args[0]
}
return v.Op == OpSP || v.Op == OpLocalAddr
}
// disjoint reports whether the memory region specified by [p1:p1+n1)
// does not overlap with [p2:p2+n2).
// A return value of false does not imply the regions overlap.
func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool {
if n1 == 0 || n2 == 0 {
return true
}
if p1 == p2 {
return false
}
baseAndOffset := func(ptr *Value) (base *Value, offset int64) {
base, offset = ptr, 0
for base.Op == OpOffPtr {
offset += base.AuxInt
base = base.Args[0]
}
return base, offset
}
p1, off1 := baseAndOffset(p1)
p2, off2 := baseAndOffset(p2)
if isSamePtr(p1, p2) {
return !overlap(off1, n1, off2, n2)
}
// p1 and p2 are not the same, so if they are both OpAddrs then
// they point to different variables.
// If one pointer is on the stack and the other is an argument
// then they can't overlap.
switch p1.Op {
case OpAddr, OpLocalAddr:
if p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpSP {
return true
}
return p2.Op == OpArg && p1.Args[0].Op == OpSP
case OpArg:
if p2.Op == OpSP || p2.Op == OpLocalAddr {
return true
}
case OpSP:
return p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpArg || p2.Op == OpSP
}
return false
}
// moveSize returns the number of bytes an aligned MOV instruction moves
func moveSize(align int64, c *Config) int64 {
switch {
case align%8 == 0 && c.PtrSize == 8:
return 8
case align%4 == 0:
return 4
case align%2 == 0:
return 2
}
return 1
}
// mergePoint finds a block among a's blocks which dominates b and is itself
// dominated by all of a's blocks. Returns nil if it can't find one.
// Might return nil even if one does exist.
func mergePoint(b *Block, a ...*Value) *Block {
// Walk backward from b looking for one of the a's blocks.
// Max distance
d := 100
for d > 0 {
for _, x := range a {
if b == x.Block {
goto found
}
}
if len(b.Preds) > 1 {
// Don't know which way to go back. Abort.
return nil
}
b = b.Preds[0].b
d--
}
return nil // too far away
found:
// At this point, r is the first value in a that we find by walking backwards.
// if we return anything, r will be it.
r := b
// Keep going, counting the other a's that we find. They must all dominate r.
na := 0
for d > 0 {
for _, x := range a {
if b == x.Block {
na++
}
}
if na == len(a) {
// Found all of a in a backwards walk. We can return r.
return r
}
if len(b.Preds) > 1 {
return nil
}
b = b.Preds[0].b
d--
}
return nil // too far away
}
// clobber invalidates values. Returns true.
// clobber is used by rewrite rules to:
// A) make sure the values are really dead and never used again.
// B) decrement use counts of the values' args.
func clobber(vv ...*Value) bool {
for _, v := range vv {
v.reset(OpInvalid)
// Note: leave v.Block intact. The Block field is used after clobber.
}
return true
}
// clobberIfDead resets v when use count is 1. Returns true.
// clobberIfDead is used by rewrite rules to decrement
// use counts of v's args when v is dead and never used.
func clobberIfDead(v *Value) bool {
if v.Uses == 1 {
v.reset(OpInvalid)
}
// Note: leave v.Block intact. The Block field is used after clobberIfDead.
return true
}
// noteRule is an easy way to track if a rule is matched when writing
// new ones. Make the rule of interest also conditional on
// noteRule("note to self: rule of interest matched")
// and that message will print when the rule matches.
func noteRule(s string) bool {
fmt.Println(s)
return true
}
// countRule increments Func.ruleMatches[key].
// If Func.ruleMatches is non-nil at the end
// of compilation, it will be printed to stdout.
// This is intended to make it easier to find which functions
// which contain lots of rules matches when developing new rules.
func countRule(v *Value, key string) bool {
f := v.Block.Func
if f.ruleMatches == nil {
f.ruleMatches = make(map[string]int)
}
f.ruleMatches[key]++
return true
}
// warnRule generates compiler debug output with string s when
// v is not in autogenerated code, cond is true and the rule has fired.
func warnRule(cond bool, v *Value, s string) bool {
if pos := v.Pos; pos.Line() > 1 && cond {
v.Block.Func.Warnl(pos, s)
}
return true
}
// for a pseudo-op like (LessThan x), extract x
func flagArg(v *Value) *Value {
if len(v.Args) != 1 || !v.Args[0].Type.IsFlags() {
return nil
}
return v.Args[0]
}
// arm64Negate finds the complement to an ARM64 condition code,
// for example Equal -> NotEqual or LessThan -> GreaterEqual
//
// TODO: add floating-point conditions
func arm64Negate(op Op) Op {
switch op {
case OpARM64LessThan:
return OpARM64GreaterEqual
case OpARM64LessThanU:
return OpARM64GreaterEqualU
case OpARM64GreaterThan:
return OpARM64LessEqual
case OpARM64GreaterThanU:
return OpARM64LessEqualU
case OpARM64LessEqual:
return OpARM64GreaterThan
case OpARM64LessEqualU:
return OpARM64GreaterThanU
case OpARM64GreaterEqual:
return OpARM64LessThan
case OpARM64GreaterEqualU:
return OpARM64LessThanU
case OpARM64Equal:
return OpARM64NotEqual
case OpARM64NotEqual:
return OpARM64Equal
case OpARM64LessThanF:
return OpARM64GreaterEqualF
case OpARM64GreaterThanF:
return OpARM64LessEqualF
case OpARM64LessEqualF:
return OpARM64GreaterThanF
case OpARM64GreaterEqualF:
return OpARM64LessThanF
default:
panic("unreachable")
}
}
// arm64Invert evaluates (InvertFlags op), which
// is the same as altering the condition codes such
// that the same result would be produced if the arguments
// to the flag-generating instruction were reversed, e.g.
// (InvertFlags (CMP x y)) -> (CMP y x)
//
// TODO: add floating-point conditions
func arm64Invert(op Op) Op {
switch op {
case OpARM64LessThan:
return OpARM64GreaterThan
case OpARM64LessThanU:
return OpARM64GreaterThanU
case OpARM64GreaterThan:
return OpARM64LessThan
case OpARM64GreaterThanU:
return OpARM64LessThanU
case OpARM64LessEqual:
return OpARM64GreaterEqual
case OpARM64LessEqualU:
return OpARM64GreaterEqualU
case OpARM64GreaterEqual:
return OpARM64LessEqual
case OpARM64GreaterEqualU:
return OpARM64LessEqualU
case OpARM64Equal, OpARM64NotEqual:
return op
case OpARM64LessThanF:
return OpARM64GreaterThanF
case OpARM64GreaterThanF:
return OpARM64LessThanF
case OpARM64LessEqualF:
return OpARM64GreaterEqualF
case OpARM64GreaterEqualF:
return OpARM64LessEqualF
default:
panic("unreachable")
}
}
// evaluate an ARM64 op against a flags value
// that is potentially constant; return 1 for true,
// -1 for false, and 0 for not constant.
func ccARM64Eval(cc interface{}, flags *Value) int {
op := cc.(Op)
fop := flags.Op
if fop == OpARM64InvertFlags {
return -ccARM64Eval(op, flags.Args[0])
}
if fop != OpARM64FlagConstant {
return 0
}
fc := flagConstant(flags.AuxInt)
b2i := func(b bool) int {
if b {
return 1
}
return -1
}
switch op {
case OpARM64Equal:
return b2i(fc.eq())
case OpARM64NotEqual:
return b2i(fc.ne())
case OpARM64LessThan:
return b2i(fc.lt())
case OpARM64LessThanU:
return b2i(fc.ult())
case OpARM64GreaterThan:
return b2i(fc.gt())
case OpARM64GreaterThanU:
return b2i(fc.ugt())
case OpARM64LessEqual:
return b2i(fc.le())
case OpARM64LessEqualU:
return b2i(fc.ule())
case OpARM64GreaterEqual:
return b2i(fc.ge())
case OpARM64GreaterEqualU:
return b2i(fc.uge())
}
return 0
}
// logRule logs the use of the rule s. This will only be enabled if
// rewrite rules were generated with the -log option, see gen/rulegen.go.
func logRule(s string) {
if ruleFile == nil {
// Open a log file to write log to. We open in append
// mode because all.bash runs the compiler lots of times,
// and we want the concatenation of all of those logs.
// This means, of course, that users need to rm the old log
// to get fresh data.
// TODO: all.bash runs compilers in parallel. Need to synchronize logging somehow?
w, err := os.OpenFile(filepath.Join(os.Getenv("GOROOT"), "src", "rulelog"),
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
panic(err)
}
ruleFile = w
}
_, err := fmt.Fprintln(ruleFile, s)
if err != nil {
panic(err)
}
}
var ruleFile io.Writer
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
func isConstZero(v *Value) bool {
switch v.Op {
case OpConstNil:
return true
case OpConst64, OpConst32, OpConst16, OpConst8, OpConstBool, OpConst32F, OpConst64F:
return v.AuxInt == 0
}
return false
}
// reciprocalExact64 reports whether 1/c is exactly representable.
func reciprocalExact64(c float64) bool {
b := math.Float64bits(c)
man := b & (1<<52 - 1)
if man != 0 {
return false // not a power of 2, denormal, or NaN
}
exp := b >> 52 & (1<<11 - 1)
// exponent bias is 0x3ff. So taking the reciprocal of a number
// changes the exponent to 0x7fe-exp.
switch exp {
case 0:
return false // ±0
case 0x7ff:
return false // ±inf
case 0x7fe:
return false // exponent is not representable
default:
return true
}
}
// reciprocalExact32 reports whether 1/c is exactly representable.
func reciprocalExact32(c float32) bool {
b := math.Float32bits(c)
man := b & (1<<23 - 1)
if man != 0 {
return false // not a power of 2, denormal, or NaN
}
exp := b >> 23 & (1<<8 - 1)
// exponent bias is 0x7f. So taking the reciprocal of a number
// changes the exponent to 0xfe-exp.
switch exp {
case 0:
return false // ±0
case 0xff:
return false // ±inf
case 0xfe:
return false // exponent is not representable
default:
return true
}
}
// check if an immediate can be directly encoded into an ARM's instruction
func isARMImmRot(v uint32) bool {
for i := 0; i < 16; i++ {
if v&^0xff == 0 {
return true
}
v = v<<2 | v>>30
}
return false
}
// overlap reports whether the ranges given by the given offset and
// size pairs overlap.
func overlap(offset1, size1, offset2, size2 int64) bool {
if offset1 >= offset2 && offset2+size2 > offset1 {
return true
}
if offset2 >= offset1 && offset1+size1 > offset2 {
return true
}
return false
}
func areAdjacentOffsets(off1, off2, size int64) bool {
return off1+size == off2 || off1 == off2+size
}
// check if value zeroes out upper 32-bit of 64-bit register.
// depth limits recursion depth. In AMD64.rules 3 is used as limit,
// because it catches same amount of cases as 4.
func zeroUpper32Bits(x *Value, depth int) bool {
switch x.Op {
case OpAMD64MOVLconst, OpAMD64MOVLload, OpAMD64MOVLQZX, OpAMD64MOVLloadidx1,
OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVBload, OpAMD64MOVBloadidx1,
OpAMD64MOVLloadidx4, OpAMD64ADDLload, OpAMD64SUBLload, OpAMD64ANDLload,
OpAMD64ORLload, OpAMD64XORLload, OpAMD64CVTTSD2SL,
OpAMD64ADDL, OpAMD64ADDLconst, OpAMD64SUBL, OpAMD64SUBLconst,
OpAMD64ANDL, OpAMD64ANDLconst, OpAMD64ORL, OpAMD64ORLconst,
OpAMD64XORL, OpAMD64XORLconst, OpAMD64NEGL, OpAMD64NOTL,
OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
OpAMD64SHLL, OpAMD64SHLLconst:
return true
case OpArg:
return x.Type.Width == 4
case OpPhi, OpSelect0, OpSelect1:
// Phis can use each-other as an arguments, instead of tracking visited values,
// just limit recursion depth.
if depth <= 0 {
return false
}
for i := range x.Args {
if !zeroUpper32Bits(x.Args[i], depth-1) {
return false
}
}
return true
}
return false
}
// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits
func zeroUpper48Bits(x *Value, depth int) bool {
switch x.Op {
case OpAMD64MOVWQZX, OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVWloadidx2:
return true
case OpArg:
return x.Type.Width == 2
case OpPhi, OpSelect0, OpSelect1:
// Phis can use each-other as an arguments, instead of tracking visited values,
// just limit recursion depth.
if depth <= 0 {
return false
}
for i := range x.Args {
if !zeroUpper48Bits(x.Args[i], depth-1) {
return false
}
}
return true
}
return false
}
// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits
func zeroUpper56Bits(x *Value, depth int) bool {
switch x.Op {
case OpAMD64MOVBQZX, OpAMD64MOVBload, OpAMD64MOVBloadidx1:
return true
case OpArg:
return x.Type.Width == 1
case OpPhi, OpSelect0, OpSelect1:
// Phis can use each-other as an arguments, instead of tracking visited values,
// just limit recursion depth.
if depth <= 0 {
return false
}
for i := range x.Args {
if !zeroUpper56Bits(x.Args[i], depth-1) {
return false
}
}
return true
}
return false
}
// isInlinableMemmove reports whether the given arch performs a Move of the given size
// faster than memmove. It will only return true if replacing the memmove with a Move is
// safe, either because Move is small or because the arguments are disjoint.
// This is used as a check for replacing memmove with Move ops.
func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
// It is always safe to convert memmove into Move when its arguments are disjoint.
// Move ops may or may not be faster for large sizes depending on how the platform
// lowers them, so we only perform this optimization on platforms that we know to
// have fast Move ops.
switch c.arch {
case "amd64":
return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz))
case "386", "arm64":
return sz <= 8
case "s390x", "ppc64", "ppc64le":
return sz <= 8 || disjoint(dst, sz, src, sz)
case "arm", "mips", "mips64", "mipsle", "mips64le":
return sz <= 4
}
return false
}
// logLargeCopy logs the occurrence of a large copy.
// The best place to do this is in the rewrite rules where the size of the move is easy to find.
// "Large" is arbitrarily chosen to be 128 bytes; this may change.
func logLargeCopy(v *Value, s int64) bool {
if s < 128 {
return true
}
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "copy", "lower", v.Block.Func.Name, fmt.Sprintf("%d bytes", s))
}
return true
}
// hasSmallRotate reports whether the architecture has rotate instructions
// for sizes < 32-bit. This is used to decide whether to promote some rotations.
func hasSmallRotate(c *Config) bool {
switch c.arch {
case "amd64", "386":
return true
default:
return false
}
}
// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
func armBFAuxInt(lsb, width int64) int64 {
if lsb < 0 || lsb > 63 {
panic("ARM(64) bit field lsb constant out of range")
}
if width < 1 || width > 64 {
panic("ARM(64) bit field width constant out of range")
}
return width | lsb<<8
}
// returns the lsb part of the auxInt field of arm64 bitfield ops.
func getARM64BFlsb(bfc int64) int64 {
return int64(uint64(bfc) >> 8)
}
// returns the width part of the auxInt field of arm64 bitfield ops.
func getARM64BFwidth(bfc int64) int64 {
return bfc & 0xff
}
// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.
func isARM64BFMask(lsb, mask, rshift int64) bool {
shiftedMask := int64(uint64(mask) >> uint64(rshift))
return shiftedMask != 0 && isPowerOfTwo(shiftedMask+1) && nto(shiftedMask)+lsb < 64
}
// returns the bitfield width of mask >> rshift for arm64 bitfield ops
func arm64BFWidth(mask, rshift int64) int64 {
shiftedMask := int64(uint64(mask) >> uint64(rshift))
if shiftedMask == 0 {
panic("ARM64 BF mask is zero")
}
return nto(shiftedMask)
}
// sizeof returns the size of t in bytes.
// It will panic if t is not a *types.Type.
func sizeof(t interface{}) int64 {
return t.(*types.Type).Size()
}
// registerizable reports whether t is a primitive type that fits in
// a register. It assumes float64 values will always fit into registers
// even if that isn't strictly true.
func registerizable(b *Block, typ *types.Type) bool {
if typ.IsPtrShaped() || typ.IsFloat() {
return true
}
if typ.IsInteger() {
return typ.Size() <= b.Func.Config.RegSize
}
return false
}
// needRaceCleanup reports whether this call to racefuncenter/exit isn't needed.
func needRaceCleanup(sym Sym, v *Value) bool {
f := v.Block.Func
if !f.Config.Race {
return false
}
if !symNamed(sym, "runtime.racefuncenter") && !symNamed(sym, "runtime.racefuncexit") {
return false
}
for _, b := range f.Blocks {
for _, v := range b.Values {
switch v.Op {
case OpStaticCall:
// Check for racefuncenter will encounter racefuncexit and vice versa.
// Allow calls to panic*
s := v.Aux.(fmt.Stringer).String()
switch s {
case "runtime.racefuncenter", "runtime.racefuncexit",
"runtime.panicdivide", "runtime.panicwrap",
"runtime.panicshift":
continue
}
// If we encountered any call, we need to keep racefunc*,
// for accurate stacktraces.
return false
case OpPanicBounds, OpPanicExtend:
// Note: these are panic generators that are ok (like the static calls above).
case OpClosureCall, OpInterCall:
// We must keep the race functions if there are any other call types.
return false
}
}
}
return true
}
// symIsRO reports whether sym is a read-only global.
func symIsRO(sym interface{}) bool {
lsym := sym.(*obj.LSym)
return lsym.Type == objabi.SRODATA && len(lsym.R) == 0
}
// symIsROZero reports whether sym is a read-only global whose data contains all zeros.
func symIsROZero(sym Sym) bool {
lsym := sym.(*obj.LSym)
if lsym.Type != objabi.SRODATA || len(lsym.R) != 0 {
return false
}
for _, b := range lsym.P {
if b != 0 {
return false
}
}
return true
}
// read8 reads one byte from the read-only global sym at offset off.
func read8(sym interface{}, off int64) uint8 {
lsym := sym.(*obj.LSym)
if off >= int64(len(lsym.P)) || off < 0 {
// Invalid index into the global sym.
// This can happen in dead code, so we don't want to panic.
// Just return any value, it will eventually get ignored.
// See issue 29215.
return 0
}
return lsym.P[off]
}
// read16 reads two bytes from the read-only global sym at offset off.
func read16(sym interface{}, off int64, byteorder binary.ByteOrder) uint16 {
lsym := sym.(*obj.LSym)
// lsym.P is written lazily.
// Bytes requested after the end of lsym.P are 0.
var src []byte
if 0 <= off && off < int64(len(lsym.P)) {
src = lsym.P[off:]
}
buf := make([]byte, 2)
copy(buf, src)
return byteorder.Uint16(buf)
}
// read32 reads four bytes from the read-only global sym at offset off.
func read32(sym interface{}, off int64, byteorder binary.ByteOrder) uint32 {
lsym := sym.(*obj.LSym)
var src []byte
if 0 <= off && off < int64(len(lsym.P)) {
src = lsym.P[off:]
}
buf := make([]byte, 4)
copy(buf, src)
return byteorder.Uint32(buf)
}
// read64 reads eight bytes from the read-only global sym at offset off.
func read64(sym interface{}, off int64, byteorder binary.ByteOrder) uint64 {
lsym := sym.(*obj.LSym)
var src []byte
if 0 <= off && off < int64(len(lsym.P)) {
src = lsym.P[off:]
}
buf := make([]byte, 8)
copy(buf, src)
return byteorder.Uint64(buf)
}
// sequentialAddresses reports true if it can prove that x + n == y
func sequentialAddresses(x, y *Value, n int64) bool {
if x.Op == Op386ADDL && y.Op == Op386LEAL1 && y.AuxInt == n && y.Aux == nil &&
(x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
return true
}
if x.Op == Op386LEAL1 && y.Op == Op386LEAL1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux &&
(x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
return true
}
if x.Op == OpAMD64ADDQ && y.Op == OpAMD64LEAQ1 && y.AuxInt == n && y.Aux == nil &&
(x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
return true
}
if x.Op == OpAMD64LEAQ1 && y.Op == OpAMD64LEAQ1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux &&
(x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
return true
}
return false
}
// flagConstant represents the result of a compile-time comparison.
// The sense of these flags does not necessarily represent the hardware's notion
// of a flags register - these are just a compile-time construct.
// We happen to match the semantics to those of arm/arm64.
// Note that these semantics differ from x86: the carry flag has the opposite
// sense on a subtraction!
// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C.
// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C.
// (because it does x + ^y + C).
// See https://en.wikipedia.org/wiki/Carry_flag#Vs._borrow_flag
type flagConstant uint8
// N reports whether the result of an operation is negative (high bit set).
func (fc flagConstant) N() bool {
return fc&1 != 0
}
// Z reports whether the result of an operation is 0.
func (fc flagConstant) Z() bool {
return fc&2 != 0
}
// C reports whether an unsigned add overflowed (carry), or an
// unsigned subtract did not underflow (borrow).
func (fc flagConstant) C() bool {
return fc&4 != 0
}
// V reports whether a signed operation overflowed or underflowed.
func (fc flagConstant) V() bool {
return fc&8 != 0
}
func (fc flagConstant) eq() bool {
return fc.Z()
}
func (fc flagConstant) ne() bool {
return !fc.Z()
}
func (fc flagConstant) lt() bool {
return fc.N() != fc.V()
}
func (fc flagConstant) le() bool {
return fc.Z() || fc.lt()
}
func (fc flagConstant) gt() bool {
return !fc.Z() && fc.ge()
}
func (fc flagConstant) ge() bool {
return fc.N() == fc.V()
}
func (fc flagConstant) ult() bool {
return !fc.C()
}
func (fc flagConstant) ule() bool {
return fc.Z() || fc.ult()
}
func (fc flagConstant) ugt() bool {
return !fc.Z() && fc.uge()
}
func (fc flagConstant) uge() bool {
return fc.C()
}
func (fc flagConstant) ltNoov() bool {
return fc.lt() && !fc.V()
}
func (fc flagConstant) leNoov() bool {
return fc.le() && !fc.V()
}
func (fc flagConstant) gtNoov() bool {
return fc.gt() && !fc.V()
}
func (fc flagConstant) geNoov() bool {
return fc.ge() && !fc.V()
}
func (fc flagConstant) String() string {
return fmt.Sprintf("N=%v,Z=%v,C=%v,V=%v", fc.N(), fc.Z(), fc.C(), fc.V())
}
type flagConstantBuilder struct {
N bool
Z bool
C bool
V bool
}
func (fcs flagConstantBuilder) encode() flagConstant {
var fc flagConstant
if fcs.N {
fc |= 1
}
if fcs.Z {
fc |= 2
}
if fcs.C {
fc |= 4
}
if fcs.V {
fc |= 8
}
return fc
}
// Note: addFlags(x,y) != subFlags(x,-y) in some situations:
// - the results of the C flag are different
// - the results of the V flag when y==minint are different
// addFlags64 returns the flags that would be set from computing x+y.
func addFlags64(x, y int64) flagConstant {
var fcb flagConstantBuilder
fcb.Z = x+y == 0
fcb.N = x+y < 0
fcb.C = uint64(x+y) < uint64(x)
fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0
return fcb.encode()
}
// subFlags64 returns the flags that would be set from computing x-y.
func subFlags64(x, y int64) flagConstant {
var fcb flagConstantBuilder
fcb.Z = x-y == 0
fcb.N = x-y < 0
fcb.C = uint64(y) <= uint64(x) // This code follows the arm carry flag model.
fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0
return fcb.encode()
}
// addFlags32 returns the flags that would be set from computing x+y.
func addFlags32(x, y int32) flagConstant {
var fcb flagConstantBuilder
fcb.Z = x+y == 0
fcb.N = x+y < 0
fcb.C = uint32(x+y) < uint32(x)
fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0
return fcb.encode()
}
// subFlags32 returns the flags that would be set from computing x-y.
func subFlags32(x, y int32) flagConstant {
var fcb flagConstantBuilder
fcb.Z = x-y == 0
fcb.N = x-y < 0
fcb.C = uint32(y) <= uint32(x) // This code follows the arm carry flag model.
fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0
return fcb.encode()
}
// logicFlags64 returns flags set to the sign/zeroness of x.
// C and V are set to false.
func logicFlags64(x int64) flagConstant {
var fcb flagConstantBuilder
fcb.Z = x == 0
fcb.N = x < 0
return fcb.encode()
}
// logicFlags32 returns flags set to the sign/zeroness of x.
// C and V are set to false.
func logicFlags32(x int32) flagConstant {
var fcb flagConstantBuilder
fcb.Z = x == 0
fcb.N = x < 0
return fcb.encode()
}
|
[
"\"GOROOT\""
] |
[] |
[
"GOROOT"
] |
[]
|
["GOROOT"]
|
go
| 1 | 0 | |
testdata/common/cgroups.py
|
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Utility code for creating cgroups for the Impala development environment.
# May be used as a library or as a command-line utility for manual testing.
import os
import sys
import errno
from optparse import OptionParser
# Options
parser = OptionParser()
parser.add_option("-s", "--cluster_size", type="int", dest="cluster_size", default=3,
help="Size of the cluster (number of impalad instances to start).")
def get_cpu_controller_root():
"""Returns the filesystem path of the CPU cgroup controller.
Currently assumes the CPU controller is mounted in the standard location.
TODO: Read /etc/mounts to find where cpu controller is mounted.
"""
CGROUP_CPU_ROOT = "/sys/fs/cgroup/cpu"
if not os.path.isdir(CGROUP_CPU_ROOT):
raise Exception("Cgroup CPU controller is not mounted at %s" % (CGROUP_CPU_ROOT))
return CGROUP_CPU_ROOT
def get_session_cpu_path():
"""Returns the path of the CPU cgroup hierarchy for this session, which is writable
by the impalad processes. The cgroup hierarchy is specified as an absolute path
under the CPU controller root.
"""
PROC_SELF_CGROUP = '/proc/self/cgroup'
cgroup_paths = open(PROC_SELF_CGROUP)
try:
for line in cgroup_paths:
parts = line.strip().split(':')
if len(parts) == 3 and parts[1] == 'cpu':
return parts[2]
finally:
cgroup_paths.close()
raise Exception("Process cgroup CPU hierarchy not found in %s" % (PROC_SELF_CGROUP))
def create_impala_cgroup_path(instance_num):
"""Returns the full filesystem path of a CPU controller cgroup hierarchy which is
writeable by an impalad. The base cgroup path is read from the environment variable
IMPALA_CGROUP_BASE_PATH if it is set, otherwise it is set to a child of the path of
the cgroup for this process.
instance_num is used to provide different (sibiling) cgroups for each impalad
instance. The returned cgroup is created if necessary.
"""
parent_cgroup = os.getenv('IMPALA_CGROUP_BASE_PATH')
if parent_cgroup is None:
# Join root path with the cpu hierarchy path by concatenting the strings. Can't use
# path.join() because the session cpu hierarchy path looks like an absolute FS path.
parent_cgroup = "%s%s" % (get_cpu_controller_root(), get_session_cpu_path())
cgroup_path = os.path.join(parent_cgroup, ("impala-%s" % instance_num))
try:
os.makedirs(cgroup_path)
except OSError, ex:
if ex.errno == errno.EEXIST and os.path.isdir(cgroup_path):
pass
else: raise
return cgroup_path
if __name__ == "__main__":
if options.cluster_size < 0:
print 'Please specify a cluster size >= 0'
sys.exit(1)
for i in range(options.cluster_size):
create_impala_cgroup_path(i)
|
[] |
[] |
[
"IMPALA_CGROUP_BASE_PATH"
] |
[]
|
["IMPALA_CGROUP_BASE_PATH"]
|
python
| 1 | 0 | |
pkg/cmd/factory/default.go
|
package factory
import (
"errors"
"fmt"
"net/http"
"os"
"path/filepath"
"time"
"github.com/cli/cli/v2/api"
"github.com/cli/cli/v2/context"
"github.com/cli/cli/v2/git"
"github.com/cli/cli/v2/internal/config"
"github.com/cli/cli/v2/internal/ghrepo"
"github.com/cli/cli/v2/pkg/cmd/extension"
"github.com/cli/cli/v2/pkg/cmdutil"
"github.com/cli/cli/v2/pkg/iostreams"
)
func New(appVersion string) *cmdutil.Factory {
var exe string
f := &cmdutil.Factory{
Config: configFunc(), // No factory dependencies
Branch: branchFunc(), // No factory dependencies
Executable: func() string {
if exe != "" {
return exe
}
exe = executable("gh")
return exe
},
}
f.IOStreams = ioStreams(f) // Depends on Config
f.HttpClient = httpClientFunc(f, appVersion) // Depends on Config, IOStreams, and appVersion
f.Remotes = remotesFunc(f) // Depends on Config
f.BaseRepo = BaseRepoFunc(f) // Depends on Remotes
f.Browser = browser(f) // Depends on Config, and IOStreams
f.ExtensionManager = extensionManager(f) // Depends on Config, HttpClient, and IOStreams
return f
}
func BaseRepoFunc(f *cmdutil.Factory) func() (ghrepo.Interface, error) {
return func() (ghrepo.Interface, error) {
remotes, err := f.Remotes()
if err != nil {
return nil, err
}
return remotes[0], nil
}
}
func SmartBaseRepoFunc(f *cmdutil.Factory) func() (ghrepo.Interface, error) {
return func() (ghrepo.Interface, error) {
httpClient, err := f.HttpClient()
if err != nil {
return nil, err
}
apiClient := api.NewClientFromHTTP(httpClient)
remotes, err := f.Remotes()
if err != nil {
return nil, err
}
repoContext, err := context.ResolveRemotesToRepos(remotes, apiClient, "")
if err != nil {
return nil, err
}
baseRepo, err := repoContext.BaseRepo(f.IOStreams)
if err != nil {
return nil, err
}
return baseRepo, nil
}
}
func remotesFunc(f *cmdutil.Factory) func() (context.Remotes, error) {
rr := &remoteResolver{
readRemotes: git.Remotes,
getConfig: f.Config,
}
return rr.Resolver()
}
func httpClientFunc(f *cmdutil.Factory, appVersion string) func() (*http.Client, error) {
return func() (*http.Client, error) {
io := f.IOStreams
cfg, err := f.Config()
if err != nil {
return nil, err
}
return NewHTTPClient(io, cfg, appVersion, true)
}
}
func browser(f *cmdutil.Factory) cmdutil.Browser {
io := f.IOStreams
return cmdutil.NewBrowser(browserLauncher(f), io.Out, io.ErrOut)
}
// Browser precedence
// 1. GH_BROWSER
// 2. browser from config
// 3. BROWSER
func browserLauncher(f *cmdutil.Factory) string {
if ghBrowser := os.Getenv("GH_BROWSER"); ghBrowser != "" {
return ghBrowser
}
cfg, err := f.Config()
if err == nil {
if cfgBrowser, _ := cfg.Get("", "browser"); cfgBrowser != "" {
return cfgBrowser
}
}
return os.Getenv("BROWSER")
}
// Finds the location of the executable for the current process as it's found in PATH, respecting symlinks.
// If the process couldn't determine its location, return fallbackName. If the executable wasn't found in
// PATH, return the absolute location to the program.
//
// The idea is that the result of this function is callable in the future and refers to the same
// installation of gh, even across upgrades. This is needed primarily for Homebrew, which installs software
// under a location such as `/usr/local/Cellar/gh/1.13.1/bin/gh` and symlinks it from `/usr/local/bin/gh`.
// When the version is upgraded, Homebrew will often delete older versions, but keep the symlink. Because of
// this, we want to refer to the `gh` binary as `/usr/local/bin/gh` and not as its internal Homebrew
// location.
//
// None of this would be needed if we could just refer to GitHub CLI as `gh`, i.e. without using an absolute
// path. However, for some reason Homebrew does not include `/usr/local/bin` in PATH when it invokes git
// commands to update its taps. If `gh` (no path) is being used as git credential helper, as set up by `gh
// auth login`, running `brew update` will print out authentication errors as git is unable to locate
// Homebrew-installed `gh`.
func executable(fallbackName string) string {
exe, err := os.Executable()
if err != nil {
return fallbackName
}
base := filepath.Base(exe)
path := os.Getenv("PATH")
for _, dir := range filepath.SplitList(path) {
p, err := filepath.Abs(filepath.Join(dir, base))
if err != nil {
continue
}
f, err := os.Stat(p)
if err != nil {
continue
}
if p == exe {
return p
} else if f.Mode()&os.ModeSymlink != 0 {
if t, err := os.Readlink(p); err == nil && t == exe {
return p
}
}
}
return exe
}
func configFunc() func() (config.Config, error) {
var cachedConfig config.Config
var configError error
return func() (config.Config, error) {
if cachedConfig != nil || configError != nil {
return cachedConfig, configError
}
cachedConfig, configError = config.ParseDefaultConfig()
if errors.Is(configError, os.ErrNotExist) {
cachedConfig = config.NewBlankConfig()
configError = nil
}
cachedConfig = config.InheritEnv(cachedConfig)
return cachedConfig, configError
}
}
func branchFunc() func() (string, error) {
return func() (string, error) {
currentBranch, err := git.CurrentBranch()
if err != nil {
return "", fmt.Errorf("could not determine current branch: %w", err)
}
return currentBranch, nil
}
}
func extensionManager(f *cmdutil.Factory) *extension.Manager {
em := extension.NewManager(f.IOStreams)
cfg, err := f.Config()
if err != nil {
return em
}
em.SetConfig(cfg)
client, err := f.HttpClient()
if err != nil {
return em
}
em.SetClient(api.NewCachedClient(client, time.Second*30))
return em
}
func ioStreams(f *cmdutil.Factory) *iostreams.IOStreams {
io := iostreams.System()
cfg, err := f.Config()
if err != nil {
return io
}
if prompt, _ := cfg.Get("", "prompt"); prompt == "disabled" {
io.SetNeverPrompt(true)
}
// Pager precedence
// 1. GH_PAGER
// 2. pager from config
// 3. PAGER
if ghPager, ghPagerExists := os.LookupEnv("GH_PAGER"); ghPagerExists {
io.SetPager(ghPager)
} else if pager, _ := cfg.Get("", "pager"); pager != "" {
io.SetPager(pager)
}
return io
}
|
[
"\"GH_BROWSER\"",
"\"BROWSER\"",
"\"PATH\""
] |
[] |
[
"GH_BROWSER",
"PATH",
"BROWSER"
] |
[]
|
["GH_BROWSER", "PATH", "BROWSER"]
|
go
| 3 | 0 | |
kvsp/main.go
|
package main
import (
"debug/elf"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/BurntSushi/toml"
)
var flagVerbose bool
const defaultCAHPProc = "ruby"
const defaultROMSize = 512
const defaultRAMSize = 512
// Flag for a list of values
// Thanks to: https://stackoverflow.com/a/28323276
type arrayFlags []string
func (i *arrayFlags) String() string {
return "my string representation"
}
func (i *arrayFlags) Set(value string) error {
*i = append(*i, value)
return nil
}
func write16le(out []byte, val int) {
out[0] = byte(val & 0xff)
out[1] = byte((val >> 8) & 0xff)
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
func getExecDir() (string, error) {
execPath, err := os.Executable()
if err != nil {
return "", err
}
return filepath.Dir(execPath), nil
}
func prefixExecDir(path string) (string, error) {
execPath, err := getExecDir()
if err != nil {
return "", err
}
return filepath.Join(execPath, path), nil
}
func getPathOf(name string) (string, error) {
path := ""
relative := true
// Check if environment variable is set in KVSP_XXX.
if path = os.Getenv("KVSP_" + strings.Replace(name, "-", "_", -1) + "_PATH"); path != "" {
relative = false
} else {
/*
Do heuristic approach, which assumes binaries are in the current
(this executable's) directory, and others are in ../share/kvsp.
*/
switch name {
case "CAHP_RT":
path = "../share/kvsp/cahp-rt"
case "CAHP_SIM":
path = "cahp-sim"
case "CLANG":
path = "clang"
case "IYOKAN":
path = "iyokan"
case "IYOKAN-BLUEPRINT-RUBY":
path = "../share/kvsp/cahp-ruby.toml"
case "IYOKAN-BLUEPRINT-PEARL":
path = "../share/kvsp/cahp-pearl.toml"
case "IYOKAN-PACKET":
path = "iyokan-packet"
default:
return "", errors.New("Invalid name")
}
}
if relative {
newPath, err := prefixExecDir(path)
if err != nil {
return "", err
}
path = newPath
}
if !fileExists(path) {
return "", fmt.Errorf("%s not found at %s", name, path)
}
return path, nil
}
// Parse the input as ELF and get ROM and RAM images.
func parseELF(fileName string, romSize, ramSize uint64) ([]byte, []byte, error) {
input, err := elf.Open(fileName)
if err != nil {
return nil, nil, err
}
rom := make([]byte, romSize)
ram := make([]byte, ramSize)
for _, prog := range input.Progs {
addr := prog.ProgHeader.Vaddr
size := prog.ProgHeader.Filesz
if size == 0 {
continue
}
var mem []byte
if addr < 0x10000 { // ROM
if addr+size >= romSize {
return nil, nil, errors.New("Invalid ROM size: too small")
}
mem = rom[addr : addr+size]
} else { // RAM
if addr-0x10000+size >= ramSize {
return nil, nil, errors.New("Invalid RAM size: too small")
}
mem = ram[addr-0x10000 : addr-0x10000+size]
}
reader := prog.Open()
_, err := reader.Read(mem)
if err != nil {
return nil, nil, err
}
}
return rom, ram, nil
}
func attachCommandLineOptions(ram []byte, cmdOptsSrc []string) error {
// N1548 5.1.2.2.1 2
// the string pointed to by argv[0]
// represents the program name; argv[0][0] shall be the null character if the
// program name is not available from the host environment.
cmdOpts := []string{""}
cmdOpts = append(cmdOpts, cmdOptsSrc...)
argc := len(cmdOpts)
// Slice for *argv.
sargv := []int{
// N1548 5.1.2.2.1 2
// argv[argc] shall be a null pointer.
0,
}
ramSize := len(ram)
index := ramSize - 2
// Set **argv to RAM
for i := len(cmdOpts) - 1; i >= 0; i-- {
opt := append([]byte(cmdOpts[i]), 0)
for j := len(opt) - 1; j >= 0; j-- {
index--
ram[index] = opt[j]
}
sargv = append(sargv, index)
}
// Align index
if index%2 == 1 {
index--
}
// Set *argv to RAM
for _, val := range sargv {
index -= 2
write16le(ram[index:index+2], val)
}
// Save argc in RAM
index -= 2
write16le(ram[index:index+2], argc)
// Save initial stack pointer in RAM
initSP := index
write16le(ram[ramSize-2:ramSize], initSP)
return nil
}
func execCmdImpl(name string, args []string) *exec.Cmd {
if flagVerbose {
fmtArgs := make([]string, len(args))
for i, arg := range args {
fmtArgs[i] = fmt.Sprintf("'%s'", arg)
}
fmt.Fprintf(os.Stderr, "exec: '%s' %s\n", name, strings.Join(fmtArgs, " "))
}
cmd := exec.Command(name, args...)
cmd.Stderr = os.Stderr
return cmd
}
func execCmd(name string, args []string) error {
cmd := execCmdImpl(name, args)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
return cmd.Run()
}
func outCmd(name string, args []string) (string, error) {
out, err := execCmdImpl(name, args).Output()
return string(out), err
}
func runIyokanPacket(args ...string) (string, error) {
// Get the path of iyokan-packet
path, err := getPathOf("IYOKAN-PACKET")
if err != nil {
return "", err
}
// Run
return outCmd(path, args)
}
func runIyokan(args0 []string, args1 []string) error {
iyokanPath, err := getPathOf("IYOKAN")
if err != nil {
return err
}
// Run iyokan
args := append(args0, args1...)
return execCmd(iyokanPath, args)
}
func packELF(
inputFileName, outputFileName string,
cmdOpts []string,
romSize, ramSize uint64,
) error {
if !fileExists(inputFileName) {
return errors.New("File not found")
}
rom, ram, err := parseELF(inputFileName, romSize, ramSize)
if err != nil {
return err
}
if err = attachCommandLineOptions(ram, cmdOpts); err != nil {
return err
}
args := []string{
"pack",
"--out", outputFileName,
}
// Write ROM data
romTmpFile, err := ioutil.TempFile("", "")
if err != nil {
return err
}
defer os.Remove(romTmpFile.Name())
if _, err = romTmpFile.Write(rom); err != nil {
return err
}
args = append(args, "--rom", "rom:"+romTmpFile.Name())
// Write RAM data
ramTmpFile, err := ioutil.TempFile("", "")
if err != nil {
return err
}
defer os.Remove(ramTmpFile.Name())
if _, err = ramTmpFile.Write(ram); err != nil {
return err
}
args = append(args, "--ram", "ram:"+ramTmpFile.Name())
// Pack
if _, err = runIyokanPacket(args...); err != nil {
return err
}
return nil
}
type plainPacketTOML struct {
NumCycles int `toml:"cycles"`
Ram []plainPacketEntryTOML `toml:"ram"`
Bits []plainPacketEntryTOML `toml:"bits"`
}
type plainPacketEntryTOML struct {
Name string `toml:"name"`
Size int `toml:"size"`
Bytes []int `toml:"bytes"`
}
type plainPacket struct {
NumCycles int
Flags map[string]bool
Regs map[string]int
Ram []int
}
func (pkt *plainPacket) loadTOML(src string) error {
var pktTOML plainPacketTOML
if _, err := toml.Decode(src, &pktTOML); err != nil {
return err
}
pkt.NumCycles = pktTOML.NumCycles
// Load flags and registers
pkt.Flags = make(map[string]bool)
pkt.Regs = make(map[string]int)
for _, entry := range pktTOML.Bits {
if entry.Size == 1 { // flag
if entry.Bytes[0] != 0 {
pkt.Flags[entry.Name] = true
} else {
pkt.Flags[entry.Name] = false
}
} else if entry.Size == 16 { // register
pkt.Regs[entry.Name] = entry.Bytes[0] | (entry.Bytes[1] << 8)
} else {
return errors.New("Invalid TOML for result packet")
}
}
// Load ram
mapRam := make(map[string]plainPacketEntryTOML)
for _, entry := range pktTOML.Ram {
if _, exists := mapRam[entry.Name]; exists {
return errors.New("Invalid TOML data: same entry name in ram")
}
mapRam[entry.Name] = entry
}
pkt.Ram = nil
if entry, ok := mapRam["ram"]; ok {
if entry.Size%8 != 0 {
return errors.New("Invalid RAM data: size is not multiple of 8")
}
pkt.Ram = make([]int, entry.Size/8)
for addr := range entry.Bytes {
pkt.Ram[addr] = entry.Bytes[addr]
}
} else {
return errors.New("Invalid TOML for result packet")
}
// Check if the packet is correct
if _, ok := pkt.Flags["finflag"]; !ok {
return errors.New("Invalid TOML for result packet: 'finflag' not found")
}
for i := 0; i < 16; i++ {
name := fmt.Sprintf("reg_x%d", i)
if _, ok := pkt.Regs[name]; !ok {
return errors.New("Invalid TOML for result packet: '" + name + "' not found")
}
}
return nil
}
func (pkt *plainPacket) print(w io.Writer) error {
fmt.Fprintf(w, "#cycle\t%d\n", pkt.NumCycles)
fmt.Fprintf(w, "\n")
fmt.Fprintf(w, "f0\t%t\n", pkt.Flags["finflag"])
fmt.Fprintf(w, "\n")
for i := 0; i < 16; i++ {
name := fmt.Sprintf("reg_x%d", i)
fmt.Fprintf(w, "x%d\t%d\n", i, pkt.Regs[name])
}
fmt.Fprintf(w, "\n")
fmt.Fprintf(w, " \t 0 1 2 3 4 5 6 7 8 9 a b c d e f")
for addr := 0; addr < len(pkt.Ram); addr++ {
if addr%16 == 0 {
fmt.Fprintf(w, "\n%06x\t", addr)
}
fmt.Fprintf(w, "%02x ", pkt.Ram[addr])
}
fmt.Fprintf(w, "\n")
return nil
}
func doCC() error {
// Get the path of clang
path, err := getPathOf("CLANG")
if err != nil {
return err
}
// Get the path of cahp-rt
cahpRtPath, err := getPathOf("CAHP_RT")
if err != nil {
return err
}
// Run
args := []string{"-target", "cahp", "-mcpu=generic", "-Oz", "--sysroot", cahpRtPath}
args = append(args, os.Args[2:]...)
return execCmd(path, args)
}
func doDebug() error {
// Get the path of cahp-sim
path, err := getPathOf("CAHP_SIM")
if err != nil {
return err
}
// Run
return execCmd(path, os.Args[2:])
}
func doEmu() error {
// Parse command-line arguments.
fs := flag.NewFlagSet("emu", flag.ExitOnError)
var (
whichCAHPCPU = fs.String("cahp-cpu", defaultCAHPProc, "Which CAHP CPU you use, ruby or pearl")
iyokanArgs arrayFlags
)
fs.Var(&iyokanArgs, "iyokan-args", "Raw arguments for Iyokan")
err := fs.Parse(os.Args[2:])
// Create tmp file for packing
packedFile, err := ioutil.TempFile("", "")
if err != nil {
return err
}
defer os.Remove(packedFile.Name())
// Pack
err = packELF(fs.Args()[0], packedFile.Name(), fs.Args()[1:], defaultROMSize, defaultRAMSize)
if err != nil {
return err
}
// Create tmp file for the result
resTmpFile, err := ioutil.TempFile("", "")
if err != nil {
return err
}
defer os.Remove(resTmpFile.Name())
// Run Iyokan in plain mode
blueprint, err := getPathOf(fmt.Sprintf("IYOKAN-BLUEPRINT-%s", strings.ToUpper(*whichCAHPCPU)))
if err != nil {
return err
}
err = runIyokan([]string{"plain", "-i", packedFile.Name(), "-o", resTmpFile.Name(), "--blueprint", blueprint}, iyokanArgs)
if err != nil {
return err
}
// Unpack the result
result, err := runIyokanPacket("packet2toml", "--in", resTmpFile.Name())
if err != nil {
return err
}
// Parse and print the result
var pkt plainPacket
if err := pkt.loadTOML(result); err != nil {
return err
}
pkt.print(os.Stdout)
return nil
}
func doDec() error {
// Parse command-line arguments.
fs := flag.NewFlagSet("dec", flag.ExitOnError)
var (
keyFileName = fs.String("k", "", "Key file name")
inputFileName = fs.String("i", "", "Input file name (encrypted)")
)
err := fs.Parse(os.Args[2:])
if err != nil {
return err
}
if *keyFileName == "" || *inputFileName == "" {
return errors.New("Specify -k and -i options properly")
}
// Create tmp file for decryption
packedFile, err := ioutil.TempFile("", "")
if err != nil {
return err
}
defer os.Remove(packedFile.Name())
// Decrypt
_, err = runIyokanPacket("dec",
"--key", *keyFileName,
"--in", *inputFileName,
"--out", packedFile.Name())
// Unpack
result, err := runIyokanPacket("packet2toml", "--in", packedFile.Name())
if err != nil {
return err
}
// Parse and print the result
var pkt plainPacket
if err := pkt.loadTOML(result); err != nil {
return err
}
pkt.print(os.Stdout)
return nil
}
func doEnc() error {
// Parse command-line arguments.
fs := flag.NewFlagSet("enc", flag.ExitOnError)
var (
keyFileName = fs.String("k", "", "Secret key file name")
inputFileName = fs.String("i", "", "Input file name (plain)")
outputFileName = fs.String("o", "", "Output file name (encrypted)")
)
err := fs.Parse(os.Args[2:])
if err != nil {
return err
}
if *keyFileName == "" || *inputFileName == "" || *outputFileName == "" {
return errors.New("Specify -k, -i, and -o options properly")
}
// Create tmp file for packing
packedFile, err := ioutil.TempFile("", "")
if err != nil {
return err
}
defer os.Remove(packedFile.Name())
// Pack
err = packELF(*inputFileName, packedFile.Name(), fs.Args(), defaultROMSize, defaultRAMSize)
if err != nil {
return err
}
// Encrypt
_, err = runIyokanPacket("enc",
"--key", *keyFileName,
"--in", packedFile.Name(),
"--out", *outputFileName)
return err
}
func doGenkey() error {
// Parse command-line arguments.
fs := flag.NewFlagSet("genkey", flag.ExitOnError)
var (
outputFileName = fs.String("o", "", "Output file name")
)
err := fs.Parse(os.Args[2:])
if err != nil {
return err
}
if *outputFileName == "" {
return errors.New("Specify -o options properly")
}
_, err = runIyokanPacket("genkey",
"--type", "tfhepp",
"--out", *outputFileName)
return err
}
func doGenbkey() error {
// Parse command-line arguments.
fs := flag.NewFlagSet("genbkey", flag.ExitOnError)
var (
inputFileName = fs.String("i", "", "Input file name (secret key)")
outputFileName = fs.String("o", "", "Output file name (bootstrapping key)")
)
err := fs.Parse(os.Args[2:])
if err != nil {
return err
}
if *inputFileName == "" || *outputFileName == "" {
return errors.New("Specify -i and -o options properly")
}
_, err = runIyokanPacket("genbkey",
"--in", *inputFileName,
"--out", *outputFileName)
return err
}
func doPlainpacket() error {
// Parse command-line arguments.
fs := flag.NewFlagSet("plainpacket", flag.ExitOnError)
var (
inputFileName = fs.String("i", "", "Input file name (plain)")
outputFileName = fs.String("o", "", "Output file name (encrypted)")
)
err := fs.Parse(os.Args[2:])
if err != nil {
return err
}
if *inputFileName == "" || *outputFileName == "" {
return errors.New("Specify -i, and -o options properly")
}
return packELF(*inputFileName, *outputFileName, fs.Args(), defaultROMSize, defaultRAMSize)
}
func doRun() error {
// Parse command-line arguments.
fs := flag.NewFlagSet("run", flag.ExitOnError)
var (
nClocks = fs.Uint("c", 0, "Number of clocks to run")
bkeyFileName = fs.String("bkey", "", "Bootstrapping key file name")
inputFileName = fs.String("i", "", "Input file name (encrypted)")
outputFileName = fs.String("o", "", "Output file name (encrypted)")
numGPU = fs.Uint("g", 0, "Number of GPUs (Unspecify or set 0 for CPU mode)")
whichCAHPCPU = fs.String("cahp-cpu", defaultCAHPProc, "Which CAHP CPU you use, ruby or pearl")
snapshotFileName = fs.String("snapshot", "", "Snapshot file name to write in")
quiet = fs.Bool("quiet", false, "Be quiet")
iyokanArgs arrayFlags
)
fs.Var(&iyokanArgs, "iyokan-args", "Raw arguments for Iyokan")
err := fs.Parse(os.Args[2:])
if err != nil {
return err
}
if *nClocks == 0 || *bkeyFileName == "" || *inputFileName == "" || *outputFileName == "" {
return errors.New("Specify -c, -bkey, -i, and -o options properly")
}
blueprint, err := getPathOf(fmt.Sprintf("IYOKAN-BLUEPRINT-%s", strings.ToUpper(*whichCAHPCPU)))
if err != nil {
return err
}
args := []string{
"-i", *inputFileName,
"--blueprint", blueprint,
}
if *numGPU > 0 {
args = append(args, "--enable-gpu", "--gpu_num", fmt.Sprint(*numGPU))
}
return runIyokanTFHE(*nClocks, *bkeyFileName, *outputFileName, *snapshotFileName, *quiet, args, iyokanArgs)
}
func doResume() error {
// Parse command-line arguments.
fs := flag.NewFlagSet("resume", flag.ExitOnError)
var (
nClocks = fs.Uint("c", 0, "Number of clocks to run")
bkeyFileName = fs.String("bkey", "", "Bootstrapping key file name")
inputFileName = fs.String("i", "", "Snapshot file to resume from")
outputFileName = fs.String("o", "", "Output file name (encrypted)")
snapshotFileName = fs.String("snapshot", "", "Snapshot file name to write in")
quiet = fs.Bool("quiet", false, "Be quiet")
iyokanArgs arrayFlags
)
fs.Var(&iyokanArgs, "iyokan-args", "Raw arguments for Iyokan")
err := fs.Parse(os.Args[2:])
if err != nil {
return err
}
if *nClocks == 0 || *bkeyFileName == "" || *inputFileName == "" || *outputFileName == "" {
return errors.New("Specify -c, -bkey, -i, and -o options properly")
}
args := []string{
"--resume", *inputFileName,
}
return runIyokanTFHE(*nClocks, *bkeyFileName, *outputFileName, *snapshotFileName, *quiet, args, iyokanArgs)
}
func runIyokanTFHE(nClocks uint, bkeyFileName string, outputFileName string, snapshotFileName string, quiet bool, otherArgs0 []string, otherArgs1 []string) error {
var err error
if snapshotFileName == "" {
snapshotFileName = fmt.Sprintf(
"kvsp_%s.snapshot", time.Now().Format("20060102150405"))
}
args := []string{
"tfhe",
"--bkey", bkeyFileName,
"-o", outputFileName,
"-c", fmt.Sprint(nClocks),
"--snapshot", snapshotFileName,
}
if quiet {
args = append(args, "--quiet")
}
args = append(args, otherArgs0...)
args = append(args, otherArgs1...)
if err = runIyokan(args, []string{}); err != nil {
return err
}
if !quiet {
fmt.Printf("\n")
fmt.Printf("Snapshot was taken as file '%s'. You can resume the process like:\n", snapshotFileName)
fmt.Printf("\t$ %s resume -c %d -i %s -o %s -bkey %s\n",
os.Args[0], nClocks, snapshotFileName, outputFileName, bkeyFileName)
}
return nil
}
var kvspVersion = "unk"
var kvspRevision = "unk"
var iyokanRevision = "unk"
var iyokanL1Revision = "unk"
var cahpRubyRevision = "unk"
var cahpPearlRevision = "unk"
var cahpRtRevision = "unk"
var cahpSimRevision = "unk"
var llvmCahpRevision = "unk"
var yosysRevision = "unk"
func doVersion() error {
fmt.Printf("KVSP %s\t(rev %s)\n", kvspVersion, kvspRevision)
fmt.Printf("- Iyokan\t(rev %s)\n", iyokanRevision)
fmt.Printf("- Iyokan-L1\t(rev %s)\n", iyokanL1Revision)
fmt.Printf("- cahp-ruby\t(rev %s)\n", cahpRubyRevision)
fmt.Printf("- cahp-pearl\t(rev %s)\n", cahpPearlRevision)
fmt.Printf("- cahp-rt\t(rev %s)\n", cahpRtRevision)
fmt.Printf("- cahp-sim\t(rev %s)\n", cahpSimRevision)
fmt.Printf("- llvm-cahp\t(rev %s)\n", llvmCahpRevision)
fmt.Printf("- Yosys\t(rev %s)\n", yosysRevision)
return nil
}
func main() {
if envvarVerbose := os.Getenv("KVSP_VERBOSE"); envvarVerbose == "1" {
flagVerbose = true
}
flag.Usage = func() {
fmt.Fprintf(os.Stderr, `Usage: %s COMMAND [OPTIONS]... ARGS...
KVSP is the first virtual secure platform in the world, which makes your life better.
Commands:
cc
debug
dec
emu
enc
genkey
genbkey
plainpacket
resume
run
version
`, os.Args[0])
flag.PrintDefaults()
}
if len(os.Args) <= 1 {
flag.Usage()
os.Exit(1)
}
var err error
switch os.Args[1] {
case "cc":
err = doCC()
case "debug":
err = doDebug()
case "dec":
err = doDec()
case "emu":
err = doEmu()
case "enc":
err = doEnc()
case "genkey":
err = doGenkey()
case "genbkey":
err = doGenbkey()
case "plainpacket":
err = doPlainpacket()
case "resume":
err = doResume()
case "run":
err = doRun()
case "version":
err = doVersion()
default:
flag.Usage()
os.Exit(1)
}
if err != nil {
log.Fatal(err)
os.Exit(1)
}
}
|
[
"\"KVSP_\" + strings.Replace(name, \"-\", \"_\", -1",
"\"KVSP_VERBOSE\""
] |
[] |
[
"KVSP_\" + strings.Replace(name, \"-\", \"_\", -",
"KVSP_VERBOSE"
] |
[]
|
["KVSP_\" + strings.Replace(name, \"-\", \"_\", -", "KVSP_VERBOSE"]
|
go
| 2 | 0 | |
legacy/clouditor-engine-azure/src/main/java/io/clouditor/credentials/AzureAccount.java
|
/*
* Copyright 2016-2019 Fraunhofer AISEC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* $$\ $$\ $$\ $$\
* $$ | $$ |\__| $$ |
* $$$$$$$\ $$ | $$$$$$\ $$\ $$\ $$$$$$$ |$$\ $$$$$$\ $$$$$$\ $$$$$$\
* $$ _____|$$ |$$ __$$\ $$ | $$ |$$ __$$ |$$ |\_$$ _| $$ __$$\ $$ __$$\
* $$ / $$ |$$ / $$ |$$ | $$ |$$ / $$ |$$ | $$ | $$ / $$ |$$ | \__|
* $$ | $$ |$$ | $$ |$$ | $$ |$$ | $$ |$$ | $$ |$$\ $$ | $$ |$$ |
* \$$$$$$\ $$ |\$$$$$ |\$$$$$ |\$$$$$$ |$$ | \$$$ |\$$$$$ |$$ |
* \_______|\__| \______/ \______/ \_______|\__| \____/ \______/ \__|
*
* This file is part of Clouditor Community Edition.
*/
package io.clouditor.credentials;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonTypeName;
import com.microsoft.azure.AzureEnvironment;
import com.microsoft.azure.credentials.ApplicationTokenCredentials;
import com.microsoft.azure.credentials.AzureCliCredentials;
import com.microsoft.azure.credentials.AzureTokenCredentials;
import com.microsoft.azure.management.Azure;
import java.io.File;
import java.io.IOException;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Table;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.commons.lang3.builder.ToStringBuilder;
@Table(name = "azure_account")
@Entity(name = "azure_account")
@JsonTypeName("Azure")
public class AzureAccount extends CloudAccount<AzureTokenCredentials> {
private static final long serialVersionUID = 1737969287469590217L;
@Column(name = "client_id", nullable = false)
@JsonProperty
private String clientId;
@Column(name = "tenant_id")
@JsonProperty
private String tenantId;
@Column(name = "domain")
@JsonProperty
private String domain;
// TODO: might be needed again if an account has multiple subscriptions to find the correct one
// @JsonProperty private String subscriptionId;
@Column(name = "client_secret")
@JsonProperty
private String clientSecret;
public static AzureAccount discover() throws IOException {
var account = new AzureAccount();
// fetch credentials from default credential chain
var credentials = defaultCredentialProviderChain();
var azure = Azure.authenticate(credentials).withDefaultSubscription();
account.setAccountId(azure.getCurrentSubscription().displayName());
account.setAutoDiscovered(true);
account.setDomain(credentials.domain());
return account;
}
private static AzureTokenCredentials defaultCredentialProviderChain() throws IOException {
// check if the default credentials-file exists
var credentialsFile = new File(defaultAuthFile());
if (credentialsFile.exists()) {
LOGGER.info("Using default credentials file {}", credentialsFile);
return ApplicationTokenCredentials.fromFile(credentialsFile);
} else {
// otherwise, use default locations
LOGGER.info("Did not find default credentials. Trying to use AzureCLI credentials instead.");
return AzureCliCredentials.create();
}
}
@Override
public void validate() throws IOException {
var credentials = this.resolveCredentials();
try {
var azure = Azure.authenticate(credentials).withDefaultSubscription();
this.setAccountId(azure.getCurrentSubscription().displayName());
} catch (RuntimeException ex) {
throw new IOException(ex.getCause());
}
}
@Override
public AzureTokenCredentials resolveCredentials() throws IOException {
if (this.isAutoDiscovered()) {
return AzureAccount.defaultCredentialProviderChain();
} else {
return new ApplicationTokenCredentials(
clientId, tenantId, clientSecret, AzureEnvironment.AZURE);
}
}
private static String defaultAuthFile() {
return System.getenv()
.getOrDefault(
"AZURE_AUTH_LOCATION", System.getProperty("user.home") + "/.azure/clouditor.azureauth");
}
public void setClientId(String clientId) {
this.clientId = clientId;
}
public void setTenantId(String tenantId) {
this.tenantId = tenantId;
}
public void setClientSecret(String clientSecret) {
this.clientSecret = clientSecret;
}
@JsonProperty
public String getAuthFile() {
return defaultAuthFile();
}
public void setDomain(String domain) {
this.domain = domain;
}
@Override
public String toString() {
return new ToStringBuilder(this)
.append("clientId", clientId)
.append("tenantId", tenantId)
.append("domain", domain)
.append("clientSecret", clientSecret)
.append("accountId", accountId)
.append("user", user)
.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AzureAccount that = (AzureAccount) o;
return new EqualsBuilder()
.append(clientId, that.clientId)
.append(tenantId, that.tenantId)
.append(domain, that.domain)
.append(clientSecret, that.clientSecret)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(clientId)
.append(tenantId)
.append(domain)
.append(clientSecret)
.toHashCode();
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
solon/src/main/java/org/noear/solon/SolonProps.java
|
package org.noear.solon;
import org.noear.solon.core.JarClassLoader;
import org.noear.solon.core.NvMap;
import org.noear.solon.core.PluginEntity;
import org.noear.solon.core.Props;
import org.noear.solon.core.util.ResourceScaner;
import java.net.URL;
import java.util.*;
import java.util.function.BiConsumer;
/**
* 统一配置加载器
*
* <pre><code>
* //
* // 手动获取配置模式(容器自动模式可用: @Inject("${water.logger}"))
* //
* Solon.cfg()
* Solon.cfg().isDebugMode()
* Solon.cfg().isDriftMode()
* Solon.cfg().get("water.logger")
* Solon.cfg().getProp("db1")
* </code></pre>
*
* @author noear
* @since 1.0
* */
public final class SolonProps extends Props {
private static final String FILE_ENCODING = "file.encoding";
private static final String FILE_ENCODING_DEF = "utf-8";
private NvMap args;
private Class<?> source;
private URL sourceLocation;
private final List<PluginEntity> plugs = new ArrayList<>();
private boolean isDebugMode;
private boolean isDriftMode;
private boolean isFilesMode;
private boolean isWhiteMode;
private boolean isSetupMode;
private String extend;
private String extendFilter;
private String appName;
private String appGroup;
private String appTitle;
public SolonProps() {
super(System.getProperties());
}
/**
* 加载配置(用于第一次加载)
*
* @param args 启用参数
*/
public SolonProps load(Class<?> source, NvMap args) {
//1.接收启动参数
this.args = args;
//1.1.应用源
this.source = source;
//1.2.应用源位置
this.sourceLocation = source.getProtectionDomain().getCodeSource().getLocation();
//2.加载文件配置
//@Deprecated
loadInit(Utils.getResource("application.properties"));
//@Deprecated
loadInit(Utils.getResource("application.yml"));
loadInit(Utils.getResource("app.properties"));
loadInit(Utils.getResource("app.yml"));
//2.1.加载环境变量(支持弹性容器设置的环境变量)
loadEnv("solon.");
//2.2.加载环境配置(例:env=pro 或 env=debug)
String env = getArg("env");
if (Utils.isEmpty(env)) {
//@Deprecated
env = getArg("profiles.active");
}
if (Utils.isNotEmpty(env)) {
//@Deprecated
loadInit(Utils.getResource("application-" + env + ".properties"));
//@Deprecated
loadInit(Utils.getResource("application-" + env + ".yml"));
loadInit(Utils.getResource("app-" + env + ".properties"));
loadInit(Utils.getResource("app-" + env + ".yml"));
}
//3.同步启动参数
this.args.forEach((k, v) -> {
if (k.contains(".")) {
this.setProperty(k, v);
System.setProperty(k, v);
}
});
//4.初始化模式状态
//是否为文件模式
isFilesMode = (sourceLocation.getPath().endsWith(".jar") == false
&& sourceLocation.getPath().contains(".jar!/") == false
&& sourceLocation.getPath().endsWith(".zip") == false
&& sourceLocation.getPath().contains(".zip!/") == false);
//是否为调试模式
isDebugMode = "1".equals(getArg("debug")); //调试模式
//是否为调试模式
isSetupMode = "1".equals(getArg("setup")); //安装模式
//是否为白名单模式
isWhiteMode = "1".equals(getArg("white", "1")); //安全模式(即白名单模式)
//是否为漂移模式
isDriftMode = "1".equals(getArg("drift")); //漂移模式(即ip会变,如pod部署)
//标识debug模式
if (isDebugMode()) {
System.setProperty("debug", "1");
}
//5.确定扩展文件夹
extend = getArg("extend");
extendFilter = getArg("extend.filter");//5.1.扩展文件夹过滤器
//6.应用基础信息
appName = getArg("app.name"); //6.应用名
appGroup = getArg("app.group"); //6.1.应用组
appTitle = getArg("app.title"); //6.1.应用标题
return this;
}
/**
* 获取启动参数
*
* @param name 参数名
* */
private String getArg(String name) {
return getArg(name, null);
}
/**
* 获取启动参数
*
* @param name 参数名
* @param def 默认值
* */
private String getArg(String name, String def) {
//尝试去启动参数取
String tmp = args.get(name);
if (Utils.isEmpty(tmp)) {
//如果为空,尝试从属性配置取
tmp = get("solon." + name);
}
if (Utils.isEmpty(tmp)) {
return def;
} else {
return tmp;
}
}
/**
* 加载环境变量
*
* @param keyStarts key 的开始字符
* */
public SolonProps loadEnv(String keyStarts) {
System.getenv().forEach((k, v) -> {
if (k.startsWith(keyStarts)) {
putIfAbsent(k, v);
}
});
return this;
}
/**
* 加载配置(用于扩展加载)
*
* @param url 配置地址
*/
public SolonProps loadAdd(URL url) {
if (url != null) {
Properties props = Utils.loadProperties(url);
loadAdd(props);
}
return this;
}
/**
* 加载配置(用于扩展加载)
*
* @param url 配置地址
* */
public SolonProps loadAdd(String url) {
return loadAdd(Utils.getResource(url));
}
/**
* 加载配置(用于扩展加载)
*
* @param props 配置地址
* */
public SolonProps loadAdd(Properties props) {
if (props != null) {
for (Map.Entry<Object, Object> kv : props.entrySet()) {
Object k1 = kv.getKey();
Object v1 = kv.getValue();
if (k1 instanceof String) {
String key = (String) k1;
if (Utils.isEmpty(key)) {
continue;
}
if (v1 instanceof String) {
// db1.url=xxx
// db1.jdbcUrl=${db1.url}
String tmpV = (String) v1;
if (tmpV.startsWith("${") && tmpV.endsWith("}")) {
String tmpK = tmpV.substring(2, tmpV.length() - 1);
tmpV = props.getProperty(tmpK);
if (tmpV == null) {
tmpV = getProperty(tmpK);
}
v1 = tmpV;
}
}
if (v1 != null) {
System.getProperties().put(kv.getKey(), v1);
put(kv.getKey(), v1);
}
}
}
}
return this;
}
/**
* 加载初始化配置
*
* 1.优先使用 system properties;可以在启动时修改配置
* 2.之后同时更新 system properties 和 solon cfg
* */
protected void loadInit(URL url) {
if (url != null) {
Properties props = Utils.loadProperties(url);
for (Map.Entry kv : System.getProperties().entrySet()) {
if (kv.getKey() instanceof String) {
String key = (String) kv.getKey();
if (Utils.isEmpty(key)) {
continue;
}
if (props.containsKey(key)) {
props.put(key, kv.getValue());
}
}
}
loadAdd(props);
}
}
/**
* 插件扫描
*/
protected void plugsScan(List<ClassLoader> classLoaders) {
for (ClassLoader classLoader : classLoaders) {
//3.查找插件配置(如果出错,让它抛出异常)
ResourceScaner.scan(classLoader, "META-INF/solon", n -> n.endsWith(".properties") || n.endsWith(".yml"))
.stream()
.map(k -> Utils.getResource(classLoader, k))
.forEach(url -> plugsScanMapDo(classLoader, url));
}
//扫描主配置
plugsScanLoadDo(JarClassLoader.global(), this);
plugsSort();
}
/**
* 插件扫描,根据某个资源地址扫描
*
* @param url 资源地址
*/
private void plugsScanMapDo(ClassLoader classLoader, URL url) {
Props p = new Props(Utils.loadProperties(url));
plugsScanLoadDo(classLoader, p);
}
private void plugsScanLoadDo(ClassLoader classLoader, Props p) {
String pluginStr = p.get("solon.plugin");
if (Utils.isNotEmpty(pluginStr)) {
int priority = p.getInt("solon.plugin.priority", 0);
String[] plugins = pluginStr.trim().split(",");
for (String clzName : plugins) {
if (clzName.length() > 0) {
PluginEntity ent = new PluginEntity(classLoader, clzName.trim());
ent.priority = priority;
plugs.add(ent);
}
}
}
}
private Set<BiConsumer<String, String>> _changeEvent = new HashSet<>();
/**
* 添加变更事件
*/
public void onChange(BiConsumer<String, String> event) {
_changeEvent.add(event);
}
/**
* 设置应用属性
* */
@Override
public synchronized Object put(Object key, Object value) {
Object obj = super.put(key, value);
if (key instanceof String && value instanceof String) {
_changeEvent.forEach(event -> {
event.accept((String) key, (String) value);
});
}
return obj;
}
/**
* 应用源
* */
public Class<?> source() {
return source;
}
/**
* 应用源位置
* */
public URL sourceLocation() {
return sourceLocation;
}
/**
* 获取启动参数
*/
public NvMap argx() {
return args;
}
/**
* 获取插件列表
*/
public List<PluginEntity> plugs() {
return plugs;
}
/**
* 对插件列表排序
* */
public void plugsSort(){
if (plugs.size() > 0) {
//进行优先级顺排(数值要倒排)
//
plugs.sort(Comparator.comparingInt(PluginEntity::getPriority).reversed());
}
}
private int serverPort;
/**
* 获取应用主端口(默认:8080)
*/
public int serverPort() {
if (serverPort == 0) {
serverPort = getInt("server.port", 8080);
}
return serverPort;
}
private String fileEncoding;
/**
* 获取应用文件编码
* */
public String fileEncoding() {
if (fileEncoding == null) {
fileEncoding = get(FILE_ENCODING, FILE_ENCODING_DEF);
}
return fileEncoding;
}
/**
* 扩展文件夹
*/
public String extend() {
return extend;
}
/**
* 扩展文件夹过滤(.mysql.,.yml)
*/
public String extendFilter() {
return extendFilter;
}
/**
* 应用名
*/
public String appName() {
return appName;
}
/**
* 应用组
*/
public String appGroup() {
return appGroup;
}
/**
* 应用标题
* */
public String appTitle() {
return appTitle;
}
/**
* 框架版本号
*/
public String version() {
return "1.5.47";
}
/**
* 是否为调试模式
*/
public boolean isDebugMode() {
return isDebugMode;
}
/**
* 是否为安装模式
* */
public boolean isSetupMode(){ return isSetupMode; }
/**
* 是否为文件运行模式(否则为包执行模式)
*/
public boolean isFilesMode() {
return isFilesMode;
}
/**
* 设置文件运行模式
* */
public void isFilesMode(boolean isFilesMode) {
this.isFilesMode = isFilesMode;
}
/**
* 是否为漂移模式
*/
public boolean isDriftMode() {
return isDriftMode;
}
/**
* 设置漂移模式
* */
public void isDriftMode(boolean isDriftMode){
this.isDriftMode = isDriftMode;
}
/**
* 是否为白名单模式
*/
public boolean isWhiteMode() {
return isWhiteMode;
}
/**
* 设置白名单模式
* */
public void isWhiteMode(boolean isWhiteMode){
this.isWhiteMode = isWhiteMode;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
bootstrap.py
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import shutil
import sys
import tempfile
from optparse import OptionParser
import os
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
|
[] |
[] |
[
"bootstrap-testing-find-links"
] |
[]
|
["bootstrap-testing-find-links"]
|
python
| 1 | 0 | |
src/simulation/run_sim.py
|
import settings
import cv2
import mss
import time
import numpy as np
import sys
import os
# make sure this is a system variable in your bashrc
NASA_ULI_ROOT_DIR = os.environ['NASA_ULI_ROOT_DIR']
XPC3_DIR = NASA_ULI_ROOT_DIR + '/src/'
sys.path.append(XPC3_DIR)
import xpc3_helper
import xpc3
def main():
with xpc3.XPlaneConnect() as client:
# Set weather and time of day
client.sendDREF("sim/time/zulu_time_sec", settings.TIME_OF_DAY * 3600 + 8 * 3600)
client.sendDREF("sim/weather/cloud_type[0]", settings.CLOUD_COVER)
# Run the simulation
if settings.DUBINS:
simulate_controller_dubins(client, settings.START_CTE, settings.START_HE,
settings.START_DTP, settings.END_DTP, settings.GET_STATE, settings.GET_CONTROL,
settings.DT, settings.CTRL_EVERY)
else:
simulate_controller(client, settings.START_CTE, settings.START_HE,
settings.START_DTP, settings.END_DTP, settings.GET_STATE, settings.GET_CONTROL)
def simulate_controller(client, startCTE, startHE, startDTP, endDTP, getState, getControl, simSpeed = 1.0):
""" Simulates a controller using the built-in X-Plane 11 dynamics
Args:
client: XPlane Client
startCTE: Starting crosstrack error (meters)
startHE: Starting heading error (degrees)
startDTP: Starting downtrack position (meters)
endDTP: Ending downtrack position (meters)
getState: Function to estimate the current crosstrack and heading errors.
Takes in an XPlane client and returns the crosstrack and
heading error estimates
getControl: Function to perform control based on the state
Takes in an XPlane client, the current crosstrack error estimate,
and the current heading error estimate and returns a control effort
-------------------
simSpeed: increase beyond 1 to speed up simulation
"""
# Reset to the desired starting position
client.sendDREF("sim/time/sim_speed", simSpeed)
xpc3_helper.reset(client, cteInit = startCTE, heInit = startHE, dtpInit = startDTP)
xpc3_helper.sendBrake(client, 0)
time.sleep(5) # 5 seconds to get terminal window out of the way
client.pauseSim(False)
dtp = startDTP
startTime = client.getDREF("sim/time/zulu_time_sec")[0]
endTime = startTime
while dtp < endDTP:
# Deal with speed
speed = xpc3_helper.getSpeed(client)
throttle = 0.1
if speed > 5:
throttle = 0.0
elif speed < 3:
throttle = 0.2
cte, he = getState(client)
rudder = getControl(client, cte, he)
client.sendCTRL([0, rudder, rudder, throttle])
# Wait for next timestep
while endTime - startTime < 1:
endTime = client.getDREF("sim/time/zulu_time_sec")[0]
time.sleep(0.001)
# Set things for next round
startTime = client.getDREF("sim/time/zulu_time_sec")[0]
endTime = startTime
_, dtp, _ = xpc3_helper.getHomeState(client)
time.sleep(0.001)
client.pauseSim(True)
def dynamics(x, y, theta, phi_deg, dt=0.05, v=5, L=5):
""" Dubin's car dynamics model (returns next state)
Args:
x: current crosstrack error (meters)
y: current downtrack position (meters)
theta: current heading error (degrees)
phi_deg: steering angle input (degrees)
-------------------------------
dt: time step (seconds)
v: speed (m/s)
L: distance between front and back wheels (meters)
"""
theta_rad = np.deg2rad(theta)
phi_rad = np.deg2rad(phi_deg)
x_dot = v * np.sin(theta_rad)
y_dot = v * np.cos(theta_rad)
theta_dot = (v / L) * np.tan(phi_rad)
x_prime = x + x_dot * dt
y_prime = y + y_dot * dt
theta_prime = theta + np.rad2deg(theta_dot) * dt
return x_prime, theta_prime, y_prime
def simulate_controller_dubins(client, startCTE, startHE, startDTP, endDTP, getState, getControl,
dt, ctrlEvery, simSpeed=1.0):
""" Simulates a controller, overriding the built-in XPlane-11 dynamics to model the aircraft
as a Dubin's car
Args:
client: XPlane Client
startCTE: Starting crosstrack error (meters)
startHE: Starting heading error (degrees)
startDTP: Starting downtrack position (meters)
endDTP: Ending downtrack position (meters)
getState: Function to estimate the current crosstrack and heading errors.
Takes in an XPlane client and returns the crosstrack and
heading error estimates
getControl: Function to perform control based on the state
Takes in an XPlane client, the current crosstrack error estimate,
and the current heading error estimate and returns a control effort
dt: time step (seconds)
crtlEvery: Frequency to get new control input
(e.g. if dt=0.5, a value of 20 for ctrlEvery will perform control
at a 1 Hz rate)
-------------------
simSpeed: increase beyond 1 to speed up simulation
"""
# Reset to the desired starting position
client.sendDREF("sim/time/sim_speed", simSpeed)
xpc3_helper.reset(client, cteInit=startCTE,
heInit=startHE, dtpInit=startDTP)
xpc3_helper.sendBrake(client, 0)
time.sleep(5) # 5 seconds to get terminal window out of the way
cte = startCTE
he = startHE
dtp = startDTP
startTime = client.getDREF("sim/time/zulu_time_sec")[0]
endTime = startTime
while dtp < endDTP:
cte_pred, he_pred = getState(client)
phiDeg = getControl(client, cte_pred, he_pred)
for i in range(ctrlEvery):
cte, he, dtp = dynamics(cte, dtp, he, phiDeg, dt)
xpc3_helper.setHomeState(client, cte, dtp, he)
time.sleep(0.03)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"NASA_ULI_ROOT_DIR"
] |
[]
|
["NASA_ULI_ROOT_DIR"]
|
python
| 1 | 0 | |
vendor/github.com/IBM/go-security-plugs/rtplugs/rtplugs.go
|
// The rtplugs package instruments golang http clients that supports a RoundTripper interface.
// It was built and tested against https://pkg.go.dev/net/http/httputil#ReverseProxy
package rtplugs
import (
"errors"
"net/http"
"os"
"strings"
"time"
pi "github.com/IBM/go-security-plugs/pluginterfaces"
)
// An http.RoundTripper interface to be used as Transport for http clients
//
// To extend reverseproxy use:
// rt := rtplugs.New(log)
// if rt != nil {
// defer rt.Close()
// reverseproxy.Transport = rt.Transport(reverseproxy.Transport)
// }
//
// While `log` is an optional logger
type RoundTrip struct {
next http.RoundTripper // the next roundtripper
roundTripPlugs []pi.RoundTripPlug // list of activated plugs
}
func (rt *RoundTrip) approveRequests(reqin *http.Request) (req *http.Request, err error) {
req = reqin
for _, p := range rt.roundTripPlugs {
start := time.Now()
req, err = p.ApproveRequest(req)
elapsed := time.Since(start)
if err != nil {
pi.Log.Infof("Plug %s: ApproveRequest returned an error %v", p.PlugName(), err)
req = nil
return
}
pi.Log.Debugf("Plug %s: ApproveRequest took %s", p.PlugName(), elapsed.String())
}
return
}
func (rt *RoundTrip) nextRoundTrip(req *http.Request) (resp *http.Response, err error) {
start := time.Now()
pi.Log.Debugf("nextRoundTrip rt.next.RoundTrip started\n")
resp, err = rt.next.RoundTrip(req)
pi.Log.Debugf("nextRoundTrip rt.next.RoundTrip ended\n")
elapsed := time.Since(start)
if err != nil {
pi.Log.Infof("nextRoundTrip (i.e. DefaultTransport) returned an error %v", err)
resp = nil
return
}
pi.Log.Debugf("nextRoundTrip (i.e. DefaultTransport) took %s\n", elapsed.String())
return
}
func (rt *RoundTrip) approveResponse(req *http.Request, respIn *http.Response) (resp *http.Response, err error) {
resp = respIn
for _, p := range rt.roundTripPlugs {
start := time.Now()
resp, err = p.ApproveResponse(req, resp)
elapsed := time.Since(start)
if err != nil {
pi.Log.Infof("Plug %s: ApproveResponse returned an error %v", p.PlugName(), err)
resp = nil
return
}
pi.Log.Debugf("Plug %s: ApproveResponse took %s", p.PlugName(), elapsed.String())
}
return
}
func (rt *RoundTrip) RoundTrip(req *http.Request) (resp *http.Response, err error) {
defer func() {
if recovered := recover(); recovered != nil {
pi.Log.Warnf("Recovered from panic during RoundTrip! Recover: %v\n", recovered)
err = errors.New("paniced during RoundTrip")
resp = nil
}
}()
if req, err = rt.approveRequests(req); err == nil {
pi.Log.Debugf("ApproveRequest ended")
if resp, err = rt.nextRoundTrip(req); err == nil {
pi.Log.Debugf("nextRoundTrip ended")
resp, err = rt.approveResponse(req, resp)
pi.Log.Debugf("approveResponse ended")
}
}
return
}
// New(pi.Logger) will attempt to strat a list of plugs
//
// env RTPLUGS defines a comma seperated list of plug names
// A typical RTPLUGS value would be "rtplug,wsplug"
// The plugs may be added statically (using imports) or dynmaicaly (.so files)
//
// For dynamically loaded plugs:
// The path of dynamicly included plugs should also be defined in RTPLUGS_SO
// env RTPLUGS_SO defines a comma seperated list of .so plug files
// relative/full path may be used
// A typical RTPLUGS_SO value would be "../../plugs/rtplug,../../plugs/wsplug"
// It is recommended to place the dynamic plugs in a plugs dir of the module.
// this helps ensure that plugs are built with the same package dependencies.
// Only plugs using the exact same package dependencies will be loaded.
func New(l pi.Logger) (rt *RoundTrip) {
// Immidiatly return nil if RTPLUGS is not set
plugsStr := os.Getenv("RTPLUGS")
if plugsStr == "" {
return
}
// Set logger for the entire RTPLUGS mechanism
if l != nil {
pi.Log = l
}
// Never panic the caller app from here
defer func() {
if r := recover(); r != nil {
pi.Log.Warnf("Recovered from panic during rtplugs.New()! One or more plugs may be skipped. Recover: %v", r)
}
if (rt != nil) && len(rt.roundTripPlugs) == 0 {
rt = nil
}
}()
// load any dynamic plugs
load()
plugs := strings.Split(plugsStr, ",")
pi.Log.Infof("Trying to activate these %d plugs %v", len(plugs), plugs)
pi.Log.Infof("Trying to activate these %d plugs %v", len(pi.RoundTripPlugs), pi.RoundTripPlugs)
for _, plugName := range plugs {
for _, p := range pi.RoundTripPlugs {
pi.Log.Infof("p.PlugName() %s", p.PlugName())
if p.PlugName() == plugName {
// found a loaded plug, lets activate it
p.Init()
if rt == nil {
rt = new(RoundTrip)
}
rt.roundTripPlugs = append(rt.roundTripPlugs, p)
pi.Log.Infof("Plugin %s is activated", plugName)
break
}
}
}
return
}
// Transport() wraps an existing RoundTripper
//
// Once the existing RoundTripper is wrapped, data flowing to and from the
// existing RoundTripper will be screened using the security plugs
func (rt *RoundTrip) Transport(t http.RoundTripper) http.RoundTripper {
if t == nil {
t = http.DefaultTransport
}
rt.next = t
return rt
}
// Close() gracefully shuts down all plugs
//
// Note that Close does not unload the .so files,
// instead, it informs all loaded plugs to gracefully shutdown and cleanup
func (rt *RoundTrip) Close() {
defer func() {
if r := recover(); r != nil {
pi.Log.Warnf("Recovered from panic during ShutdownPlugs!\n\tOne or more plugs may be skipped\n\tRecover: %v", r)
}
pi.Log.Sync()
}()
for _, p := range rt.roundTripPlugs {
p.Shutdown()
}
rt.roundTripPlugs = []pi.RoundTripPlug{}
}
|
[
"\"RTPLUGS\""
] |
[] |
[
"RTPLUGS"
] |
[]
|
["RTPLUGS"]
|
go
| 1 | 0 | |
src/main/java/hudson/plugins/report/jck/main/cmdline/JobsRecognition.java
|
/*
* The MIT License
*
* Copyright 2016 jvanek.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.plugins.report.jck.main.cmdline;
import hudson.plugins.report.jck.main.formatters.Formatter;
import hudson.plugins.report.jck.main.formatters.HtmlFormatter;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class JobsRecognition {
private static JobsRecognition INSTANCE;
private static final Map<File, String> cache = new HashMap<>();
public static JobsRecognition jobsRecognition() {
if (INSTANCE == null) {
INSTANCE = new JobsRecognition();
}
return INSTANCE;
}
private String jenkinsDir;
private File jobsDir;
public JobsRecognition() {
jenkinsDir = System.getProperty("jenkins_home");
if (jenkinsDir == null) {
jenkinsDir = System.getenv("JENKINS_HOME");
}
jobsDir = new File(jenkinsDir, "jobs");
}
public File getJobsDir() {
return jobsDir;
}
public String getJenkinsDir() {
return jenkinsDir;
}
public String[] getPossibleJobs() {
String[] possibleJobs = jobsDir.list();
Arrays.sort(possibleJobs, Collections.reverseOrder());
return possibleJobs;
}
public boolean isJob(String jobName) {
return arrayContains(getPossibleJobs(), jobName);
}
private File creteJobFile(String jobName) {
return new File(jobsDir, jobName);
}
private File creteBuildsDir(String jobName) {
return new File(creteJobFile(jobName), "builds");
}
public File creteBuildDir(String jobName, int id) {
return new File(creteBuildsDir(jobName), String.valueOf(id));
}
private File creteLogFile(String jobName, int id) {
return creteLogFile(creteBuildDir(jobName, id));
}
private static File creteLogFile(File dir) {
return new File(dir, "log");
}
public File creteChangelogFile(String jobName, int id) {
return creteChangelogFile(creteBuildDir(jobName, id));
}
private static File creteChangelogFile(File dir) {
return new File(dir, "changelog.xml");
}
public static boolean arrayContains(String[] as, String s) {
for (String a : as) {
if (a.equals(s)) {
return true;
}
}
return false;
}
public int getLatestBuildId(String jobName) {
return getLatestBuildId(creteBuildsDir(jobName));
}
private static int getLatestBuildId(File jobDir) {
if (jobDir.exists() && jobDir.isDirectory()) {
String[] files = jobDir.list();
List<Integer> results = new ArrayList<>(files.length);
for (String file : files) {
try {
Integer i = Integer.valueOf(file);
results.add(i);
} catch (Exception ex) {
System.err.println(jobDir + "/" + file + " is not number.");
}
}
Collections.sort(results);
return results.get(results.size() - 1);
} else {
throw new RuntimeException(jobDir + " do not exists or is not directory");
}
}
public void checkJob(String jobName) {
if (!isJob(jobName)) {
System.err.println("Possible jobs");
String[] pj = getPossibleJobs();
for (String jobs : pj) {
System.err.println(jobs);
}
throw new RuntimeException("Unknown job `" + jobName + "`");
}
}
void printJobInfo(String jobName, Formatter formatter) {
checkJob(jobName);
String[] builds = creteBuildsDir(jobName).list();
List<Integer> results = new ArrayList<>(builds.length);
for (String build : builds) {
if (isNumber(build)) {
results.add(Integer.valueOf(build));
}
}
int latest = getLatestBuildId(jobName);
Collections.sort(results);
formatter.initDoc();
for (Integer result : results) {
formatter.small();
File f = creteBuildDir(jobName, result);
if (isUnknown(f)) {
formatter.startColor(Formatter.SupportedColors.Magenta);
} else if (isFailed(f)) {
formatter.startColor(Formatter.SupportedColors.Red);
} else if (isAborted(f)) {
formatter.startColor(Formatter.SupportedColors.LightMagenta);
} else if (isUnstable(f)) {
formatter.startColor(Formatter.SupportedColors.Yellow);
} else if (isPassed(f)) {
formatter.startColor(Formatter.SupportedColors.Green);
} else {
formatter.startColor(Formatter.SupportedColors.Cyan);
}
formatter.print("" + result + "(" + (result - latest) + "): ");
formatter.print("" + getChangelogsNvr(f));
String tt = JobsRecognition.tail(creteLogFile(jobName, result));
if (tt != null) {
//tt = tt.trim();
}
formatter.print(" [" + tt + "]");
if (isUnknown(f)) {
formatter.print(" [unknown status!]");
}
formatter.closeBuildsList();
}
formatter.closeDoc();
}
public static boolean isNumber(String s) {
try {
Integer.valueOf(s);
return true;
} catch (Exception ex) {
return false;
}
}
public static String getChangelogsNvr(File buildPath) {
File f = creteChangelogFile(buildPath);
try {
String content = new Scanner(f).useDelimiter("\\Z").next();
String[] lines = content.split("[<>]");
boolean read1 = true;
boolean read2 = false;
for (String line : lines) {
line = line.replaceAll("\\s+", "");
if (line.isEmpty()) {
continue;
}
if (read1 && read2) {
return line;
}
if (line.equals("rpms")) {
read1 = false;
}
if (line.equals("/rpms")) {
read1 = true;
}
if (line.equals("nvr")) {
read2 = true;
}
if (line.equals("/nvr")) {
read2 = false;
}
}
} catch (Exception ex) {
return null;
}
return null;
}
//maybe linux only, not utf8 valid solution... nto much tested, just copypasted and worked
public static String tail(File file) {
if (!file.exists()) {
return null;
}
if (cache.get(file) != null) {
return cache.get(file);
}
RandomAccessFile fileHandler = null;
try {
fileHandler = new RandomAccessFile(file, "r");
long fileLength = fileHandler.length() - 1;
StringBuilder sb = new StringBuilder();
for (long filePointer = fileLength; filePointer != -1; filePointer--) {
fileHandler.seek(filePointer);
int readByte = fileHandler.readByte();
if (readByte == 0xA) {
if (filePointer == fileLength) {
continue;
}
break;
} else if (readByte == 0xD) {
if (filePointer == fileLength - 1) {
continue;
}
break;
}
sb.append((char) readByte);
}
String lastLine = sb.reverse().toString();
cache.put(file, lastLine);
return lastLine;
} catch (java.io.FileNotFoundException e) {
e.printStackTrace();
return null;
} catch (java.io.IOException e) {
e.printStackTrace();
return null;
} finally {
if (fileHandler != null) {
try {
fileHandler.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
//this is not static in purpose
public boolean shouldBeSkipped(File f) {
if (isUnknown(f)) {
return true;
}
return (isAborted(f) || isFailed(f));
}
private final Pattern ABRTD = Pattern.compile(".*ABORTED.*");
private final Pattern FAILD = Pattern.compile(".*FAILURE.*");
private final Pattern UNSTB = Pattern.compile(".*UNSTABLE.*");
private final Pattern PASSD = Pattern.compile(".*SUCCESS.*");
public boolean isAborted(File f) {
return ABRTD.matcher(tail(creteLogFile(f))).matches();
}
public boolean isFailed(File f) {
return FAILD.matcher(tail(creteLogFile(f))).matches();
}
public boolean isUnstable(File f) {
return UNSTB.matcher(tail(creteLogFile(f))).matches();
}
public boolean isPassed(File f) {
return PASSD.matcher(tail(creteLogFile(f))).matches();
}
public boolean isUnknown(File f) {
if (!f.exists()) {
return true;
}
try {
return !(isAborted(f) || isFailed(f) || isUnstable(f) || isPassed(f));
} catch (Exception ex) {
ex.printStackTrace();
return true;
}
}
}
|
[
"\"JENKINS_HOME\""
] |
[] |
[
"JENKINS_HOME"
] |
[]
|
["JENKINS_HOME"]
|
java
| 1 | 0 | |
giraph-debugger/src/main/java/org/apache/giraph/debugger/utils/DebuggerUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.giraph.debugger.utils;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.giraph.utils.ReflectionUtils;
import org.apache.giraph.utils.WritableUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
/**
* Contains common utility classes shared one or more of:
* <ul>
* <li>Graft instrumenter and the
* <li>server that serves data to Graft GUI by talking to HDFS
* <li>Wrapper classes around the scenario protocol buffers that are stored
* under {@link org.apache.giraph.debugger.utils}.
* </ul>
*
* author semihsalihoglu
*/
public class DebuggerUtils {
/**
* The path to the HDFS root for storing Graft traces.
*/
public static final String TRACE_ROOT = System.getProperty(
"giraph.debugger.traceRootAtHDFS",
"/user/" + System.getProperty("user.name") + "/giraph-debug-traces");
/**
* The path to the HDFS root for storing cached Giraph job jars.
*/
public static final String JARCACHE_HDFS = System.getProperty(
"giraph.debugger.jobCacheAtHDFS", TRACE_ROOT + "/jars");
/**
* The path to the local root directory for storing cached Giraph job jars.
*/
public static final String JARCACHE_LOCAL = System.getProperty(
"giraph.debugger.jobCacheLocal", System.getenv("HOME") +
"/.giraph-debug/jars");
/**
* Enumeration of different trace files Graft saves in HDFS.
*/
public enum DebugTrace {
/**
* Regular trace capturing a vertex computation.
*/
VERTEX_REGULAR("regular vertex"),
/**
* Captured exception from a vertex.
*/
VERTEX_EXCEPTION("exception from a vertex"),
/**
* All traces of a particular vertex.
*/
VERTEX_ALL,
/**
* Captured message integrity violations.
*/
INTEGRITY_MESSAGE_ALL("invalid messages"),
/**
* Trace of the single message violating constraints.
*/
INTEGRITY_MESSAGE_SINGLE_VERTEX("vertex sending invalid messages"),
/**
* Trace of the vertex computation that sends an invalid message.
*/
INTEGRITY_VERTEX("vertex having invalid value"),
/**
* Regular trace of a MasterCompute.
*/
MASTER_REGULAR("regular MasterCompute"),
/**
* Trace capturing exception thrown from a MasterCompute.
*/
MASTER_EXCEPTION("exception from MasterCompute"),
/**
* All traces of MasterCompute.
*/
MASTER_ALL,
/**
* The jar signature that links the instrumented jar.
*/
JAR_SIGNATURE;
/**
* The label of this debug trace.
*/
private final String label;
/**
* Creates a DebugTrace instance without a label.
*/
private DebugTrace() {
this.label = null;
}
/**
* Creates a DebugTrace instance with a specific label.
* @param label The label.
*/
private DebugTrace(String label) {
this.label = label;
}
/**
* Returns the label.
* @return the label
*/
public String getLabel() {
return label;
}
}
/**
* File name prefix for regular traces.
*/
public static final String PREFIX_TRACE_REGULAR = "reg";
/**
* File name prefix for exception traces.
*/
public static final String PREFIX_TRACE_EXCEPTION = "err";
/**
* File name prefix for vertex value integrity traces.
*/
public static final String PREFIX_TRACE_VERTEX = "vv";
/**
* File name prefix for message integrity traces.
*/
public static final String PREFIX_TRACE_MESSAGE = "msg";
/**
* Disallows creating instances of this class.
*/
private DebuggerUtils() { }
/**
* Makes a clone of a writable object. Giraph sometimes reuses and overwrites
* the bytes inside {@link Writable} objects. For example, when reading the
* incoming messages inside a {@link Computation} class through the iterator
* Giraph supplies, Giraph uses only one object. Therefore in order to keep a
* pointer to particular object, we need to clone it.
*
* @param <T>
* Type of the clazz.
* @param writableToClone
* Writable object to clone.
* @param clazz
* Class of writableToClone.
* @return a clone of writableToClone.
*/
public static <T extends Writable> T makeCloneOf(T writableToClone,
Class<T> clazz) {
T idCopy = newInstance(clazz);
// Return value is null if clazz is assignable to NullWritable.
if (idCopy == null) {
return writableToClone;
}
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(
byteArrayOutputStream);
try {
writableToClone.write(dataOutputStream);
} catch (IOException e) {
// Throwing a runtime exception because the methods that call other
// methods
// such as addNeighborWrapper or addOutgoingMessageWrapper, implement
// abstract classes
// or interfaces of Giraph that we can't edit to include a throws
// statement.
throw new RuntimeException(e);
}
//
if (byteArrayOutputStream.toByteArray() != null) {
WritableUtils.readFieldsFromByteArray(
byteArrayOutputStream.toByteArray(), idCopy);
byteArrayOutputStream.reset();
}
return idCopy;
}
/**
* Instantiates a new object from the given class.
*
* @param <T> The type of the new instance to create.
* @param theClass The class of the new instance to create.
* @return The newly created instance.
*/
public static <T> T newInstance(Class<T> theClass) {
return NullWritable.class.isAssignableFrom(theClass) ? null :
ReflectionUtils.newInstance(theClass);
}
/**
* Returns the full trace file name for the given type of debug trace. One or
* more of the passed arguments will be used in the file name.
*
* @param debugTrace The debug trace for generating the file name.
* @param jobId The job id of the job the debug trace belongs to.
* @param superstepNo The superstep number of the debug trace.
* @param vertexId The vertex id of the debug trace.
* @param taskId The task id of the debug trace.
* @return The full trace file name.
*/
public static String getFullTraceFileName(DebugTrace debugTrace,
String jobId, Long superstepNo, String vertexId, String taskId) {
return getTraceFileRoot(jobId) + "/" +
getTraceFileName(debugTrace, superstepNo, vertexId, taskId);
}
/**
* A convenience method around
* {@link #getFullTraceFileName(DebugTrace, String, Long, String, Integer)}.
*
* @param superstepNo The superstep number of the trace.
* @param jobId The job id of the trace.
* @param taskId The task id of the trace.
* @return The full trace file name for debug trace of message integrity.
*/
public static String getMessageIntegrityAllTraceFullFileName(
long superstepNo, String jobId, String taskId) {
return getFullTraceFileName(DebugTrace.INTEGRITY_MESSAGE_ALL, jobId,
superstepNo, null /* no vertex Id */, taskId);
}
/**
* A convenience method around
* {@link #getFullTraceFileName(DebugTrace, String, Long, String, Integer)}.
*
* @param masterDebugTrace The debug trace for generating the file name.
* @param jobId The job id the debug trace belongs to.
* @param superstepNo The superstep number.
* @return The full trace file name of the master compute trace.
*/
public static String getFullMasterTraceFileName(DebugTrace masterDebugTrace,
String jobId, Long superstepNo) {
return getFullTraceFileName(masterDebugTrace, jobId, superstepNo,
null /* no vertex Id */, null /* no trace Id */);
}
/**
* A convenience method around
* {@link #getFullTraceFileName(DebugTrace, String, Long, String, Integer)}.
*
* @param debugTrace The debug trace for generating the file name.
* @param jobId The job id the debug trace belongs to.
* @param superstepNo The superstep number.
* @param vertexId The vertex id of the debug trace.
* @return The full trace file name without the trace id.
*/
public static String getFullTraceFileName(DebugTrace debugTrace,
String jobId, Long superstepNo, String vertexId) {
return getFullTraceFileName(debugTrace, jobId, superstepNo, vertexId,
null /* no trace Id */);
}
/**
* Maps debug trace to file names with additional parameters.
*
* @param debugTrace The debug trace.
* @param superstepNo The superstep number.
* @param vertexId The vertex id.
* @param taskId The task id.
* @return The file name that corresponds to the debug trace.
*/
private static String getTraceFileName(DebugTrace debugTrace,
Long superstepNo, String vertexId, String taskId) {
String format = getTraceFileFormat(debugTrace);
switch (debugTrace) {
case VERTEX_REGULAR:
return String.format(format, superstepNo, vertexId);
case VERTEX_EXCEPTION:
return String.format(format, superstepNo, vertexId);
case INTEGRITY_MESSAGE_ALL:
return String.format(format, taskId, superstepNo);
case INTEGRITY_MESSAGE_SINGLE_VERTEX:
return String.format(format, superstepNo, vertexId);
case INTEGRITY_VERTEX:
return String.format(format, superstepNo, vertexId);
case MASTER_REGULAR:
return String.format(format, superstepNo);
case MASTER_EXCEPTION:
return String.format(format, superstepNo);
default:
return null;
}
}
/**
* Returns the file name of the trace file given the three parameters. Pass
* arbitrary vertexId for traces which do not require a vertexId.
*
* @param debugTrace
* The debug trace.
* @return The file name format for the debug trace to be used with
* {@link String#format(String, Object...)}.
*/
public static String getTraceFileFormat(DebugTrace debugTrace) {
// XXX is this function giving the String format? or regex? Seems latter.
switch (debugTrace) {
case VERTEX_REGULAR:
return PREFIX_TRACE_REGULAR + "_stp_%s_vid_%s.tr";
case VERTEX_EXCEPTION:
return PREFIX_TRACE_EXCEPTION + "_stp_%s_vid_%s.tr";
case VERTEX_ALL:
return String.format("(%s|%s)%s", PREFIX_TRACE_REGULAR,
PREFIX_TRACE_EXCEPTION, "_stp_%s_vid_%s.tr");
case INTEGRITY_MESSAGE_ALL:
return "task_%s_msg_intgrty_stp_%s.tr";
case INTEGRITY_MESSAGE_SINGLE_VERTEX:
return PREFIX_TRACE_MESSAGE + "_intgrty_stp_%s_vid_%s.tr";
case INTEGRITY_VERTEX:
return PREFIX_TRACE_VERTEX + "_intgrty_stp_%s_vid_%s.tr";
case MASTER_REGULAR:
return "master_" + PREFIX_TRACE_REGULAR + "_stp_%s.tr";
case MASTER_EXCEPTION:
return "master_" + PREFIX_TRACE_EXCEPTION + "_stp_%s.tr";
case MASTER_ALL:
return String.format("master_(%s|%s)_%s", PREFIX_TRACE_REGULAR,
PREFIX_TRACE_EXCEPTION, "_stp_%s.tr");
default:
throw new IllegalArgumentException("DebugTrace not supported.");
}
}
/**
* Maps prefix back to the corresponding debug trace.
*
* @param prefix The file name prefix.
* @return The debug trace value that corresponds to given prefix.
* @throws IllegalArgumentException Thrown if prefix isn't supported.
*/
public static DebugTrace getVertexDebugTraceForPrefix(String prefix) {
if (prefix.equals(PREFIX_TRACE_REGULAR)) {
return DebugTrace.VERTEX_REGULAR;
} else if (prefix.equals(PREFIX_TRACE_EXCEPTION)) {
return DebugTrace.VERTEX_EXCEPTION;
} else if (prefix.equals(PREFIX_TRACE_VERTEX)) {
return DebugTrace.INTEGRITY_VERTEX;
} else if (prefix.equals(PREFIX_TRACE_MESSAGE)) {
return DebugTrace.INTEGRITY_MESSAGE_SINGLE_VERTEX;
} else {
throw new IllegalArgumentException("Prefix not supported");
}
}
/**
* Returns the root directory of the trace files for the given job.
*
* @param jobId The job id of the job.
* @return The root path for storing traces for the job.
*/
public static String getTraceFileRoot(String jobId) {
return String.format("%s/%s", DebuggerUtils.TRACE_ROOT, jobId);
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
java
| 1 | 0 | |
cosmos-backend/filesystem/artifact.go
|
package filesystem
import (
"cosmos"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
jsoniter "github.com/json-iterator/go"
)
var json = jsoniter.ConfigDefault
var _ cosmos.ArtifactService = (*ArtifactService)(nil)
type ArtifactService struct {
}
func NewArtifactService() *ArtifactService {
return &ArtifactService{}
}
func (s *ArtifactService) GetArtifactory(syncID int, executionDate time.Time) (*cosmos.Artifactory, error) {
path := filepath.Join(
cosmos.ArtifactDir,
strconv.Itoa(syncID),
executionDate.Format(time.RFC3339),
)
if err := os.MkdirAll(path, 0777); err != nil {
return nil, err
}
return &cosmos.Artifactory{Path: path}, nil
}
func (s *ArtifactService) GetArtifactRef(artifactory *cosmos.Artifactory, id int, attempt int32) (*log.Logger, error) {
var err error
artifactory.Once[id].Do(func() {
var file *os.File
file, err = os.OpenFile(filepath.Join(artifactory.Path, cosmos.ArtifactNames[id]), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return
}
artifactory.Artifacts[id] = log.New(file, fmt.Sprintf("[Attempt %3d] ", attempt), log.LstdFlags)
})
if err != nil {
return nil, err
}
if artifactory.Artifacts[id] == nil {
return nil, fmt.Errorf("artifact for %s is unavailable", cosmos.ArtifactNames[id])
}
return artifactory.Artifacts[id], nil
}
func (s *ArtifactService) WriteArtifact(artifactory *cosmos.Artifactory, id int, contents interface{}) error {
if reflect.ValueOf(contents).IsNil() {
return nil
}
file, err := os.Create(filepath.Join(artifactory.Path, cosmos.ArtifactNames[id]))
if err != nil {
return err
}
defer file.Close()
b, err := json.Marshal(contents)
if err != nil {
return err
}
_, err = file.Write(b)
if err != nil {
return err
}
return nil
}
func (s *ArtifactService) GetArtifactPath(artifactory *cosmos.Artifactory, id int) *string {
path := filepath.Join(artifactory.Path, cosmos.ArtifactNames[id])
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil
}
// For Docker-in-Docker, we have to return the path as it would be on the host.
path = strings.TrimPrefix(path, cosmos.ArtifactDir)
path = filepath.Join(os.Getenv("ARTIFACT_DIR"), path)
return &path
}
func (s *ArtifactService) GetArtifactData(artifactory *cosmos.Artifactory, id int) ([]byte, error) {
path := filepath.Join(artifactory.Path, cosmos.ArtifactNames[id])
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return nil, cosmos.Errorf(cosmos.ENOTFOUND, "Requested artifact does not exist")
}
return nil, err
}
return ioutil.ReadFile(path)
}
func (s *ArtifactService) CloseArtifactory(artifactory *cosmos.Artifactory) {
for _, artifact := range artifactory.Artifacts {
if artifact != nil {
artifact.Writer().(*os.File).Close()
}
}
}
|
[
"\"ARTIFACT_DIR\""
] |
[] |
[
"ARTIFACT_DIR"
] |
[]
|
["ARTIFACT_DIR"]
|
go
| 1 | 0 | |
third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py
|
#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Crosstool wrapper for compiling CUDA programs with nvcc on Windows.
DESCRIPTION:
This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc
"""
from __future__ import print_function
from argparse import ArgumentParser
import os
import subprocess
import re
import sys
import pipes
# Template values set by cuda_autoconf.
CPU_COMPILER = ('/dt7/usr/bin/gcc')
GCC_HOST_COMPILER_PATH = ('/dt7/usr/bin/gcc')
NVCC_PATH = '/usr/local/cuda-10.0/bin/nvcc'
NVCC_VERSION = '10.0'
NVCC_TEMP_DIR = "C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir"
supported_cuda_compute_capabilities = [ "3.0", "6.0" ]
def Log(s):
print('gpus/crosstool: {0}'.format(s))
def GetOptionValue(argv, option):
"""Extract the list of values for option from options.
Args:
option: The option whose value to extract, without the leading '/'.
Returns:
1. A list of values, either directly following the option,
(eg., /opt val1 val2) or values collected from multiple occurrences of
the option (eg., /opt val1 /opt val2).
2. The leftover options.
"""
parser = ArgumentParser(prefix_chars='/')
parser.add_argument('/' + option, nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args and vars(args)[option]:
return (sum(vars(args)[option], []), leftover)
return ([], leftover)
def _update_options(nvcc_options):
if NVCC_VERSION in ("7.0",):
return nvcc_options
update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" }
return [ update_options[opt] if opt in update_options else opt
for opt in nvcc_options ]
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover)
def InvokeNvcc(argv, log=False):
"""Call nvcc with arguments assembled from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
log: True if logging is requested.
Returns:
The return value of calling os.system('nvcc ' + args)
"""
src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')
out_file = [ f for f in argv if f.startswith('/Fo') ]
if len(out_file) != 1:
raise Error('Please sepecify exactly one output file for cuda compilation.')
out = ['-o', out_file[0][len('/Fo'):]]
nvcc_compiler_options, argv = GetNvccOptions(argv)
opt_option, argv = GetOptionValue(argv, 'O')
opt = ['-g', '-G']
if (len(opt_option) > 0 and opt_option[0] != 'd'):
opt = ['-O2']
include_options, argv = GetOptionValue(argv, 'I')
includes = ["-I " + include for include in include_options]
defines, argv = GetOptionValue(argv, 'D')
defines = ['-D' + define for define in defines]
undefines, argv = GetOptionValue(argv, 'U')
undefines = ['-U' + define for define in undefines]
# The rest of the unrecongized options should be passed to host compiler
host_compiler_options = [option for option in argv if option not in (src_files + out_file)]
m_options = ["-m64"]
nvccopts = ['-D_FORCE_INLINES']
for capability in supported_cuda_compute_capabilities:
capability = capability.replace('.', '')
nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % (
capability, capability, capability)]
nvccopts += nvcc_compiler_options
nvccopts += undefines
nvccopts += defines
nvccopts += m_options
nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"']
nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files
# If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP
# Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check
# http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver
# Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists.
if os.path.isfile(NVCC_TEMP_DIR):
os.remove(NVCC_TEMP_DIR)
if not os.path.exists(NVCC_TEMP_DIR):
os.makedirs(NVCC_TEMP_DIR)
nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR]
cmd = [NVCC_PATH] + nvccopts
if log:
Log(cmd)
proc = subprocess.Popen(cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=os.environ.copy(),
shell=True)
proc.wait()
return proc.returncode
def main():
parser = ArgumentParser()
parser.add_argument('-x', nargs=1)
parser.add_argument('--cuda_log', action='store_true')
args, leftover = parser.parse_known_args(sys.argv[1:])
if args.x and args.x[0] == 'cuda':
if args.cuda_log: Log('-x cuda')
leftover = [pipes.quote(s) for s in leftover]
if args.cuda_log: Log('using nvcc')
return InvokeNvcc(leftover, log=args.cuda_log)
# Strip our flags before passing through to the CPU compiler for files which
# are not -x cuda. We can't just pass 'leftover' because it also strips -x.
# We not only want to pass -x to the CPU compiler, but also keep it in its
# relative location in the argv list (the compiler is actually sensitive to
# this).
cpu_compiler_flags = [flag for flag in sys.argv[1:]
if not flag.startswith(('--cuda_log'))
and not flag.startswith(('-nvcc_options'))]
return subprocess.call([CPU_COMPILER] + cpu_compiler_flags)
if __name__ == '__main__':
sys.exit(main())
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
qa/rpc-tests/maxuploadtarget.py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("DMND", "dolmend"),
help="dolmend binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
[] |
[] |
[
"DMND"
] |
[]
|
["DMND"]
|
python
| 1 | 0 | |
sets/generator/nopattern/output.py
|
import base64
import simplejson as json
import os
import pprint
from io import BytesIO
from IPython.core.display import display, HTML
from matplotlib import pyplot as plt
from optimus.infer import is_str
def output_image(fig, path):
"""
Output a png file
:param fig:
:param path: Matplotlib figure
:return: Base64 encode image
"""
fig.savefig(path, format='png')
plt.close()
def output_base64(fig):
"""
Output a matplotlib as base64 encode
:param fig: Matplotlib figure
:return: Base64 encode image
"""
fig_file = BytesIO()
plt.savefig(fig_file, format='png')
# rewind to beginning of file
fig_file.seek(0)
fig_png = base64.b64encode(fig_file.getvalue())
plt.close(fig)
return fig_png.decode('utf8')
def print_html(html):
"""
Display() helper to print html code
:param html: html code to be printed
:return:
"""
try:
if "DATABRICKS_RUNTIME_VERSION" in os.environ:
displayHTML(result)
else:
display(HTML(html))
return True
except NameError:
return False
def print_json(value):
"""
Print a human readable json
:param value: json to be printed
:return: json
"""
pp = pprint.PrettyPrinter(indent=2)
if is_str(value):
value = value.replace("'", "\"")
value = json.loads(value)
pp.pprint(value)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
examples/3d/subduction/viz/plot_faultdir.py
|
#!/usr/bin/env pvpython
# -*- Python -*- (syntax highlighting)
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
# Plot the undeformed domain as a gray wireframe and then the fault
# surfaces, colored by the magnitude of fault slip.
#
# This Python script runs using pvpython or within the ParaView Python
# shell.
# User-specified parameters.
#
# Default values for parameters. To use different values, overwrite
# them in the ParaView Python shell or on the command line. For
# example, set OUTPUT_DIR to the absolute path if not starting
# ParaView from the terminal shell where you ran PyLith:
#
# import os
# OUTPUT_DIR = os.path.join(os.environ["HOME"], "src", "pylith", "examples", "2d", "subduction", "output")
DEFAULTS = {
"OUTPUT_DIR": "output",
"SIM": "step02",
"FIELD": "normal_dir",
"FAULTS": ["fault-slab"],
}
# ----------------------------------------------------------------------
from paraview.simple import *
import os
def visualize(parameters):
# Disable automatic camera reset on "Show"
paraview.simple._DisableFirstRenderCameraReset()
# Read domain data
filename = os.path.join(parameters.output_dir, "%s-domain.xmf" % parameters.sim)
if not os.path.isfile(filename):
raise IOError("File '%s' does not exist." % filename)
dataDomain = XDMFReader(FileNames=[filename])
RenameSource("%s-domain" % parameters.sim, dataDomain)
scene = GetAnimationScene()
scene.UpdateAnimationUsingDataTimeSteps()
view = GetActiveViewOrCreate('RenderView')
# Gray wireframe for undeformed domain.
domainDisplay = Show(dataDomain, view)
domainDisplay.Representation = 'Wireframe'
domainDisplay.AmbientColor = [0.5, 0.5, 0.5]
# Read fault data
dataFaults = []
for fault in parameters.faults:
filename = os.path.join(parameters.output_dir, "%s-%s_info.xmf" % (parameters.sim, fault))
if not os.path.isfile(filename):
raise IOError("File '%s' does not exist." % filename)
data = XDMFReader(FileNames=[filename])
RenameSource("%s-%s" % (parameters.sim, fault), data)
dataFaults.append(data)
groupFaults = GroupDatasets(Input=dataFaults)
faultDisplay = Show(groupFaults, view)
faultDisplay.SetRepresentationType('Surface With Edges')
faultDisplayProperties = GetDisplayProperties(groupFaults, view=view)
faultDisplayProperties.DiffuseColor = [0.25, 0.25, 1.0]
# Add arrows to show displacement vectors.
glyph = Glyph(Input=groupFaults, GlyphType="Arrow")
glyph.Vectors = ["POINTS", parameters.field]
glyph.GlyphMode = "All Points"
glyphDisplay = Show(glyph, view)
glyphDisplay.Representation = "Surface"
view.ResetCamera()
view.Update()
Render()
class Parameters(object):
keys = ("OUTPUT_DIR", "SIM", "FIELD", "FAULTS")
def __init__(self):
globalVars = globals()
for key in Parameters.keys:
if key in globalVars.keys():
setattr(self, key.lower(), globalVars[key])
else:
setattr(self, key.lower(), DEFAULTS[key])
return
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Running from outside the ParaView GUI via pvpython
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output-dir", action="store", dest="output_dir", default=DEFAULTS["OUTPUT_DIR"])
parser.add_argument("--sim", action="store", dest="sim", default=DEFAULTS["SIM"])
parser.add_argument("--faults", action="store", dest="faults")
parser.add_argument("--field", action="store", dest="field", default=DEFAULTS["FIELD"])
args = parser.parse_args()
if args.faults:
args.faults = args.faults.split(",")
else:
args.faults = DEFAULTS["FAULTS"]
visualize(args)
Interact()
else:
# Running inside the ParaView GUI
visualize(Parameters())
# End of file
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
deepfence_backend/websocket_api/scope_websocket_client/helper.go
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
"os"
"os/exec"
"reflect"
"strconv"
"time"
"github.com/gomodule/redigo/redis"
)
const (
ScopeWsScheme = "ws"
ScopeBaseUrl = "deepfence-topology:8004"
NodeTypeHost = "host"
NodeTypeContainer = "container"
NodeTypeProcess = "process"
NodeTypeContainerImage = "container_image"
NodeTypeContainerByName = "container_by_name"
NodeTypeProcessByName = "process_by_name"
NodeTypePod = "pod"
NodeTypeKubeController = "kube_controller"
NodeTypeKubeService = "kube_service"
NodeTypeSwarmService = "swarm_service"
TopologyIdContainer = "containers"
TopologyIdContainerImage = "containers-by-image"
TopologyIdContainerByName = "containers-by-hostname"
TopologyIdProcess = "processes"
TopologyIdProcessByName = "processes-by-name"
TopologyIdHost = "hosts"
TopologyIdPod = "pods"
TopologyIdKubeController = "kube-controllers"
TopologyIdKubeService = "services"
TopologyIdSwarmService = "swarm-services"
noOfHostsRedisKey = "x-hosts"
dfIdToScopeIdRedisKeyPrefix = "DF_ID_TO_SCOPE_ID_"
ScopeTopologyCount = "TOPOLOGY_COUNT"
TopologyFilterPrefix = "TOPOLOGY_FILTERS_"
topologyHostsProbeMapRedisKey = "TOPOLOGY_HOSTS_PROBE_MAP"
topologyLocalNetworksRedisKey = "TOPOLOGY_LOCAL_NETWORKS"
topologyLocalNetworksK8sRedisKey = "TOPOLOGY_LOCAL_NETWORKS_K8S"
topologyLocalServicesK8sRedisKey = "TOPOLOGY_LOCAL_SERVICES_K8S"
countOfHostsByUserKey = "TOPOLOGY_USER_HOST_COUNT_MAP"
TopologyFormatDeepfence = "deepfence"
TopologyFormatScope = "scope"
RedisExpiryTime = 180 // 3 minutes
kubeSystemNamespace = "kube-system"
KubePublicNamespace = "kube-public"
dockerStateCreated = "created"
dockerStateDead = "dead"
dockerStateExited = "exited"
dockerStatePaused = "paused"
dockerStateRestarting = "restarting"
dockerStateRunning = "running"
dockerStateDeleted = "deleted"
dockerStateRemoving = "removing"
dockerStateUp = "up"
dockerStateStopped = "stopped"
filterTypeStr = "string"
filterTypeNumber = "number"
filterTypeBool = "bool"
cveScanLogsEsIndex = "cve-scan"
secretScanLogsEsIndex = "secret-scan-logs"
scanStatusNeverScanned = "never_scanned"
esAggsSize = 50000
)
var (
ScopeWebSocketUrl map[string]url.URL
TopologyIdNodeTypeMap map[string]string
RedisAddr string
AllNodeTypes []string
statusMap map[string]string
)
func init() {
AllNodeTypes = []string{NodeTypeHost, NodeTypeContainer, NodeTypeContainerByName, NodeTypeContainerImage, NodeTypeProcess,
NodeTypeProcessByName, NodeTypePod, NodeTypeKubeController, NodeTypeKubeService, NodeTypeSwarmService}
statusMap = map[string]string{
"QUEUED": "queued", "STARTED": "in_progress", "SCAN_IN_PROGRESS": "in_progress", "WARN": "in_progress",
"COMPLETED": "complete", "ERROR": "error", "STOPPED": "error", "GENERATING_SBOM": "in_progress",
"GENERATED_SBOM": "in_progress", "IN_PROGRESS": "in_progress", "COMPLETE": "complete"}
RedisAddr = fmt.Sprintf("%s:%s", os.Getenv("REDIS_HOST"), os.Getenv("REDIS_PORT"))
ScopeWebSocketUrl = map[string]url.URL{
NodeTypeHost: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/hosts/ws", RawQuery: "t=5s"},
NodeTypeContainer: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/containers/ws", RawQuery: "system=application&stopped=both&pseudo=show&t=5s"},
NodeTypeContainerByName: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/containers-by-hostname/ws", RawQuery: "system=application&stopped=both&pseudo=show&t=5s"},
NodeTypeContainerImage: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/containers-by-image/ws", RawQuery: "system=application&stopped=both&pseudo=show&t=5s"},
NodeTypeProcess: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/processes/ws", RawQuery: "unconnected=show&t=5s"},
NodeTypeProcessByName: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/processes-by-name/ws", RawQuery: "unconnected=show&t=5s"},
NodeTypePod: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/pods/ws", RawQuery: "snapshot=hide&storage=hide&pseudo=show&namespace=&t=5s"},
NodeTypeKubeController: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/kube-controllers/ws", RawQuery: "pseudo=show&namespace=&t=5s"},
NodeTypeKubeService: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/services/ws", RawQuery: "pseudo=show&namespace=&t=5s"},
NodeTypeSwarmService: {Scheme: ScopeWsScheme, Host: ScopeBaseUrl, Path: "/topology-api/topology/swarm-services/ws", RawQuery: "pseudo=show&namespace=&t=5s"},
}
TopologyIdNodeTypeMap = map[string]string{
TopologyIdPod: NodeTypePod,
TopologyIdContainer: NodeTypeContainer,
TopologyIdContainerByName: NodeTypeContainerByName,
TopologyIdContainerImage: NodeTypeContainerImage,
TopologyIdHost: NodeTypeHost,
TopologyIdKubeController: NodeTypeKubeController,
TopologyIdKubeService: NodeTypeKubeService,
TopologyIdProcess: NodeTypeProcess,
TopologyIdProcessByName: NodeTypeProcessByName,
TopologyIdSwarmService: NodeTypeSwarmService,
}
}
func newRedisPool() (*redis.Pool, int) {
var dbNumInt int
var errVal error
dbNumStr := os.Getenv("REDIS_DB_NUMBER")
if dbNumStr == "" {
dbNumInt = 0
} else {
dbNumInt, errVal = strconv.Atoi(dbNumStr)
if errVal != nil {
dbNumInt = 0
}
}
return &redis.Pool{
MaxIdle: 10,
MaxActive: 30, // max number of connections
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", RedisAddr, redis.DialDatabase(dbNumInt))
if err != nil {
return nil, err
}
return c, err
},
}, dbNumInt
}
func uniqueSlice(strSlice []string) []string {
keys := make(map[string]bool)
var list []string
for _, entry := range strSlice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
func InArray(val interface{}, array interface{}) bool {
switch reflect.TypeOf(array).Kind() {
case reflect.Slice:
s := reflect.ValueOf(array)
for i := 0; i < s.Len(); i++ {
if reflect.DeepEqual(val, s.Index(i).Interface()) == true {
return true
}
}
}
return false
}
func ExecuteCommandInBackground(commandStr string) {
cmd := exec.Command("/bin/sh", "-c", commandStr)
err := cmd.Start()
if err != nil {
return
}
err = WaitFunction(cmd)
if err != nil {
return
}
}
func WaitFunction(command *exec.Cmd) error {
err := command.Wait()
if err != nil {
return err
}
return nil
}
func JsonEncode(data interface{}) ([]byte, error) {
buf := new(bytes.Buffer)
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
err := enc.Encode(data)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (topologyOptions *TopologyOptions) TopologyOptionsValidate() {
// container?format=deepfence&pseudo=show&stopped=both
topologyParams := topologyOptions.Params
// format = deepfence | scope
if topologyParams.Format != TopologyFormatDeepfence && topologyParams.Format != TopologyFormatScope {
topologyParams.Format = TopologyFormatDeepfence
}
// pseudo = show | hide
if topologyParams.Pseudo != "show" && topologyParams.Pseudo != "hide" {
topologyParams.Pseudo = "show"
}
// stopped = both | running | stopped
if topologyParams.Stopped != "stopped" && topologyParams.Stopped != "running" && topologyParams.Stopped != "both" {
topologyParams.Stopped = "both"
}
// unconnected = show | hide
if topologyParams.Unconnected != "show" && topologyParams.Unconnected != "hide" {
topologyParams.Unconnected = "show"
}
if topologyOptions.NodeType == NodeTypeHost {
topologyOptions.Channel = fmt.Sprintf("%s?format=%s", topologyOptions.NodeType, topologyParams.Format)
} else if topologyOptions.NodeType == NodeTypeContainer || topologyOptions.NodeType == NodeTypeContainerByName || topologyOptions.NodeType == NodeTypeContainerImage {
topologyOptions.Channel = fmt.Sprintf("%s?stopped=%s&pseudo=%s&format=%s", topologyOptions.NodeType, topologyParams.Stopped, topologyParams.Pseudo, topologyParams.Format)
} else if topologyOptions.NodeType == NodeTypeProcess || topologyOptions.NodeType == NodeTypeProcessByName {
topologyOptions.Channel = fmt.Sprintf("%s?unconnected=%s&format=%s", topologyOptions.NodeType, topologyParams.Unconnected, topologyParams.Format)
} else if topologyOptions.NodeType == NodeTypePod {
topologyOptions.Channel = fmt.Sprintf("%s?namespace=%s&pseudo=%s&format=%s", topologyOptions.NodeType, topologyParams.Namespace, topologyParams.Pseudo, topologyParams.Format)
} else if topologyOptions.NodeType == NodeTypeKubeController || topologyOptions.NodeType == NodeTypeKubeService || topologyOptions.NodeType == NodeTypeSwarmService {
topologyOptions.Channel = fmt.Sprintf("%s?namespace=%s&pseudo=%s&format=%s", topologyOptions.NodeType, topologyParams.Namespace, topologyParams.Pseudo, topologyParams.Format)
} else {
topologyOptions.Channel = ""
}
}
func GracefulExit() {
time.Sleep(time.Second * 5)
os.Exit(1)
}
func FetchTopologyData(redisConn redis.Conn, channel string) ([]byte, error) {
var data []byte
data, err := redis.Bytes(redisConn.Do("GET", channel))
if err != nil {
return data, err
}
return data, nil
}
type TopologyParams struct {
Format string `json:"format,omitempty"`
Stopped string `json:"stopped,omitempty"`
Pseudo string `json:"pseudo,omitempty"`
Unconnected string `json:"unconnected,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
type TopologyOptions struct {
NodeType string `json:"node_type"`
Params TopologyParams `json:"params"`
Channel string `json:"channel"`
}
type MetricSample struct {
Timestamp time.Time `json:"date,omitempty"`
Value float64 `json:"value,omitempty"`
}
type Metric struct {
ID string `json:"id,omitempty"`
Label string `json:"label,omitempty"`
Format string `json:"format,omitempty"`
Group string `json:"group,omitempty"`
Value float64 `json:"value,omitempty"`
ValueEmpty bool `json:"valueEmpty,omitempty"`
Priority float64 `json:"priority,omitempty"`
Samples []MetricSample `json:"samples"`
Min float64 `json:"min,omitempty"`
Max float64 `json:"max,omitempty"`
First time.Time `json:"first,omitempty"`
Last time.Time `json:"last,omitempty"`
URL string `json:"url,omitempty"`
}
type TableRow struct {
ID string `json:"id,omitempty"`
Entries map[string]string `json:"entries,omitempty"`
}
type TableColumn struct {
ID string `json:"id,omitempty"`
Label string `json:"label,omitempty"`
DataType string `json:"dataType,omitempty"`
}
type TopologyTable struct {
ID string `json:"id,omitempty"`
Label string `json:"label,omitempty"`
Type string `json:"type,omitempty"`
Columns []TableColumn `json:"columns,omitempty"`
Rows []TableRow `json:"rows,omitempty"`
TruncationCount int `json:"truncationCount,omitempty"`
}
type ScopeMetadata struct {
ID string `json:"id,omitempty"`
Label string `json:"label,omitempty"`
Value string `json:"value,omitempty"`
Priority float64 `json:"priority,omitempty"`
DataType string `json:"dataType,omitempty"`
Truncate int `json:"truncate,omitempty"`
}
type Parent struct {
ID string `json:"id,omitempty"`
Label string `json:"label,omitempty"`
TopologyID string `json:"topologyId,omitempty"`
}
type ParentNode struct {
ID string `json:"id,omitempty"`
Label string `json:"label,omitempty"`
Type string `json:"type"`
}
type ScopeTopology struct {
ID string `json:"id,omitempty"`
Label string `json:"label,omitempty"`
LabelMinor string `json:"labelMinor,omitempty"`
Rank string `json:"rank,omitempty"`
Shape string `json:"shape,omitempty"`
Metadata []ScopeMetadata `json:"metadata,omitempty"`
Parents []Parent `json:"parents,omitempty"`
Metrics []Metric `json:"metrics,omitempty"`
Tables []TopologyTable `json:"tables,omitempty"`
Adjacency []string `json:"adjacency,omitempty"`
Pseudo bool `json:"pseudo"`
}
type CloudMetadata struct {
CloudProvider string `json:"cloud_provider"`
InstanceID string `json:"instance_id,omitempty"`
PublicIP []string `json:"public_ip"`
PrivateIP []string `json:"private_ip"`
InstanceType string `json:"instance_type,omitempty"`
AvailabilityZone string `json:"availability_zone,omitempty"`
Hostname string `json:"hostname,omitempty"`
KernelId string `json:"kernel_id,omitempty"`
ID string `json:"id,omitempty"`
DataCenter string `json:"data_center,omitempty"`
Domain string `json:"domain,omitempty"`
Zone string `json:"zone,omitempty"`
Name string `json:"name,omitempty"`
MachineType string `json:"machine_type,omitempty"`
VmID string `json:"vm_id,omitempty"`
VMSize string `json:"vm_size,omitempty"`
Location string `json:"location,omitempty"`
OsType string `json:"os_type,omitempty"`
SKU string `json:"sku,omitempty"`
ResourceGroupName string `json:"resource_group_name,omitempty"`
}
type TopologyStatistics struct {
HideIfEmpty bool `json:"hide_if_empty"`
Name string `json:"name"`
Options []struct {
DefaultValue string `json:"defaultValue"`
ID string `json:"id"`
Options []struct {
Label string `json:"label"`
Value string `json:"value"`
} `json:"options"`
} `json:"options"`
Rank int `json:"rank"`
Stats struct {
EdgeCount int `json:"edge_count"`
FilteredNodes int `json:"filtered_nodes"`
NodeCount int `json:"node_count"`
NonpseudoNodeCount int `json:"nonpseudo_node_count"`
} `json:"stats"`
SubTopologies []struct {
HideIfEmpty bool `json:"hide_if_empty"`
Name string `json:"name"`
Options []struct {
DefaultValue string `json:"defaultValue"`
ID string `json:"id"`
Options []struct {
Label string `json:"label"`
Value string `json:"value"`
} `json:"options"`
} `json:"options"`
Rank int `json:"rank"`
Stats struct {
EdgeCount int `json:"edge_count"`
FilteredNodes int `json:"filtered_nodes"`
NodeCount int `json:"node_count"`
NonpseudoNodeCount int `json:"nonpseudo_node_count"`
} `json:"stats"`
URL string `json:"url"`
} `json:"sub_topologies,omitempty"`
URL string `json:"url"`
}
type DeepfenceTopology struct {
AgentVersion string `json:"version,omitempty"`
AgentRunning string `json:"agent_running,omitempty"`
KernelVersion string `json:"kernel_version,omitempty"`
Uptime int `json:"uptime,omitempty"`
AuthToken string `json:"auth_token,omitempty"`
HostName string `json:"host_name,omitempty"`
Os string `json:"os,omitempty"`
LocalNetworks []string `json:"local_networks,omitempty"`
InterfaceNames []string `json:"interfaceNames,omitempty"`
Name string `json:"name"`
InterfaceIps map[string]string `json:"interface_ips,omitempty"`
CloudProvider string `json:"cloud_provider,omitempty"`
Adjacency []string `json:"adjacency,omitempty"`
DockerContainerCommand string `json:"docker_container_command,omitempty"`
DockerContainerStateHuman string `json:"docker_container_state_human,omitempty"`
DockerContainerUptime int `json:"docker_container_uptime,omitempty"`
DockerContainerNetworks string `json:"docker_container_networks,omitempty"`
DockerContainerIps []string `json:"docker_container_ips,omitempty"`
DockerContainerCreated string `json:"docker_container_created,omitempty"`
DockerContainerID string `json:"docker_container_id,omitempty"`
DockerContainerState string `json:"docker_container_state,omitempty"`
DockerContainerPorts string `json:"docker_container_ports,omitempty"`
ID string `json:"id"`
ContainerName string `json:"container_name,omitempty"`
Type string `json:"type"`
ImageName string `json:"image_name,omitempty"`
ImageNameWithTag string `json:"image_name_with_tag,omitempty"`
Pseudo bool `json:"pseudo"`
Meta string `json:"meta,omitempty"`
ImageTag string `json:"image_tag,omitempty"`
ContainerCount int `json:"container_count,omitempty"`
PodCount int `json:"pod_count,omitempty"`
PodName string `json:"pod_name,omitempty"`
Pid int `json:"pid,omitempty"`
Cmdline string `json:"cmdline,omitempty"`
OpenFiles string `json:"OpenFiles,omitempty"`
Ppid int `json:"ppid,omitempty"`
Threads int `json:"threads,omitempty"`
Process string `json:"process,omitempty"`
KubernetesState string `json:"kubernetes_state,omitempty"`
KubernetesIP string `json:"kubernetes_ip,omitempty"`
KubernetesPublicIP string `json:"kubernetes_public_ip,omitempty"`
KubernetesIngressIP string `json:"kubernetes_ingress_ip,omitempty"`
KubernetesNamespace string `json:"kubernetes_namespace,omitempty"`
KubernetesCreated string `json:"kubernetes_created,omitempty"`
KubernetesRestartCount int `json:"kubernetes_restart_count,omitempty"`
KubernetesIsInHostNetwork bool `json:"kubernetes_is_in_host_network,omitempty"`
KubernetesType string `json:"kubernetes_type,omitempty"`
KubernetesPorts string `json:"kubernetes_ports,omitempty"`
KubernetesNodeType string `json:"kubernetes_node_type,omitempty"`
KubernetesObservedGeneration int `json:"kubernetes_observed_generation,omitempty"`
KubernetesDesiredReplicas int `json:"kubernetes_desired_replicas,omitempty"`
KubernetesStrategy string `json:"kubernetes_strategy,omitempty"`
KubernetesSnapshotData string `json:"kubernetes_snapshot_data,omitempty"`
KubernetesVolumeClaim string `json:"kubernetes_volume_claim,omitempty"`
KubernetesVolumeCapacity string `json:"kubernetes_volume_capacity,omitempty"`
KubernetesVolumeName string `json:"kubernetes_volume_name,omitempty"`
KubernetesVolumeSnapshotName string `json:"kubernetes_volume_snapshot_name,omitempty"`
KubernetesProvisioner string `json:"kubernetes_provisioner,omitempty"`
KubernetesName string `json:"kubernetes_name,omitempty"`
KubernetesStorageClassName string `json:"kubernetes_storage_class_name,omitempty"`
KubernetesAccessModes string `json:"kubernetes_access_modes,omitempty"`
KubernetesStatus string `json:"kubernetes_status,omitempty"`
KubernetesStorageDriver string `json:"kubernetes_storage_driver,omitempty"`
Parents []ParentNode `json:"parents,omitempty"`
ConnectedProcesses *ConnectedProcesses `json:"connectedProcesses,omitempty"`
CloudMetadata *CloudMetadata `json:"cloud_metadata,omitempty"`
KubernetesClusterId string `json:"kubernetes_cluster_id,omitempty"`
KubernetesClusterName string `json:"kubernetes_cluster_name,omitempty"`
ClusterAgentProbeId string `json:"cluster_agent_probe_id"`
UserDefinedTags []string `json:"user_defined_tags"`
DockerImageSize string `json:"docker_image_size,omitempty"`
DockerImageCreatedAt string `json:"docker_image_created_at,omitempty"`
DockerImageVirtualSize string `json:"docker_image_virtual_size,omitempty"`
DockerImageID string `json:"docker_image_id,omitempty"`
IsUiVm bool `json:"is_ui_vm,omitempty"`
Metrics []Metric `json:"metrics,omitempty"`
SwarmStackNamespace string `json:"stack_namespace,omitempty"`
ScopeId string `json:"scope_id,omitempty"`
VulnerabilityScanStatus string `json:"vulnerability_scan_status,omitempty"`
VulnerabilityScanStatusTime string `json:"vulnerability_scan_status_time,omitempty"`
SecretScanStatus string `json:"secret_scan_status,omitempty"`
SecretScanStatusTime string `json:"secret_scan_status_time,omitempty"`
}
type TopologyFilterNumberOption struct {
Min int `json:"min"`
Max int `json:"max"`
}
type TopologyFilterOption struct {
Name string `json:"name"`
Label string `json:"label"`
Type string `json:"type"`
Options []string `json:"options"`
NumberOptions *TopologyFilterNumberOption `json:"number_options,omitempty"`
}
type ConnectedProcesses interface{}
func multiRemoveFromSlice(data *[]string, ids []string) {
m := make(map[string]bool, len(ids))
for _, id := range ids {
m[id] = true
}
s, x := *data, 0
for _, r := range s {
if !m[r] {
s[x] = r
x++
}
}
*data = s[0:x]
}
func multiRemoveFromScopeTopologySlice(data *[]ScopeTopology, ids []string) {
m := make(map[string]bool, len(ids))
for _, id := range ids {
m[id] = true
}
s, x := *data, 0
for _, r := range s {
if !m[r.ID] {
s[x] = r
x++
}
}
*data = s[0:x]
}
func multiRemoveFromDfTopologySlice(data *[]DeepfenceTopology, ids []string) {
m := make(map[string]bool, len(ids))
for _, id := range ids {
m[id] = true
}
s, x := *data, 0
for _, r := range s {
if !m[r.ID] {
s[x] = r
x++
}
}
*data = s[0:x]
}
func DeepCopyDfTopology(originalMap map[string]DeepfenceTopology) map[string]DeepfenceTopology {
newMap := map[string]DeepfenceTopology{}
for k, v := range originalMap {
newMap[k] = v
}
return newMap
}
func DeepCopyScopeTopology(originalMap map[string]ScopeTopology) map[string]ScopeTopology {
newMap := map[string]ScopeTopology{}
for k, v := range originalMap {
newMap[k] = v
}
return newMap
}
func (scopeTopologyDiff *ScopeTopologyDiff) deleteIdsFromScopeTopologyDiff(deleteNodeIds []string) {
multiRemoveFromSlice(&scopeTopologyDiff.Remove, deleteNodeIds)
multiRemoveFromScopeTopologySlice(&scopeTopologyDiff.Add, deleteNodeIds)
multiRemoveFromScopeTopologySlice(&scopeTopologyDiff.Update, deleteNodeIds)
}
func (dfTopologyDiff *DeepfenceTopologyDiff) deleteIdsFromDfTopologyDiff(deleteNodeIds []string) {
multiRemoveFromSlice(&dfTopologyDiff.Remove, deleteNodeIds)
multiRemoveFromDfTopologySlice(&dfTopologyDiff.Add, deleteNodeIds)
multiRemoveFromDfTopologySlice(&dfTopologyDiff.Update, deleteNodeIds)
}
type ScopeTopologyDiff struct {
Add []ScopeTopology `json:"add"`
Update []ScopeTopology `json:"update"`
Remove []string `json:"remove"`
Reset bool `json:"reset"`
Options TopologyOptions `json:"options"`
}
type DeepfenceTopologyDiff struct {
Add []DeepfenceTopology `json:"add"`
Update []DeepfenceTopology `json:"update"`
Remove []string `json:"remove"`
Reset bool `json:"reset"`
Options TopologyOptions `json:"options"`
}
|
[
"\"REDIS_HOST\"",
"\"REDIS_PORT\"",
"\"REDIS_DB_NUMBER\""
] |
[] |
[
"REDIS_PORT",
"REDIS_HOST",
"REDIS_DB_NUMBER"
] |
[]
|
["REDIS_PORT", "REDIS_HOST", "REDIS_DB_NUMBER"]
|
go
| 3 | 0 | |
infra/api/controller.go
|
package api
import (
"errors"
"fmt"
"net/http"
"os"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/go-playground/validator/v10"
domainService "github.com/joubertredrat-tests/unico-dev-test-2k21/domain/fair/service"
"github.com/joubertredrat-tests/unico-dev-test-2k21/infra/domain/fair/repository"
"github.com/joubertredrat-tests/unico-dev-test-2k21/infra/log"
"github.com/joubertredrat-tests/unico-dev-test-2k21/infra/mysql"
)
type Controller struct {
}
func NewController() Controller {
return Controller{}
}
func (c *Controller) handleHealth(ctx *gin.Context) {
response := struct {
Message string `json:"message"`
}{
Message: "Hi, you are you today?",
}
ctx.JSON(http.StatusOK, response)
}
func (c *Controller) handleListOpenMarket(ctx *gin.Context) {
log, err := log.NewLogFile(os.Getenv("APP_LOG_FILENAME"))
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
var request OpenMarketListSearchCriteria
ctx.ShouldBindQuery(&request)
db, err := mysql.NewMysqlConnection(
os.Getenv("DB_HOST"),
os.Getenv("DB_PORT"),
os.Getenv("DB_DBNAME"),
os.Getenv("DB_USER"),
os.Getenv("DB_PASSWORD"),
)
if err != nil {
log.Println(err)
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
searchCriteria := createOpenMarkeSearchCriteriatFromListRequest(request)
openMarketRepositoryMysql := repository.NewOpenMarketRepositoryMysql(db, log)
openMarketService := domainService.NewOpenMarketService(openMarketRepositoryMysql)
openMarketList, _ := openMarketService.GetListByCriteria(searchCriteria)
response := createResponseFromOpenMarketList(openMarketList)
ctx.JSON(http.StatusOK, response)
}
func (c *Controller) handleCreateOpenMarket(ctx *gin.Context) {
log, err := log.NewLogFile(os.Getenv("APP_LOG_FILENAME"))
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
var request OpenMarketCreateRequest
if err := ctx.ShouldBindBodyWith(&request, binding.JSON); err != nil {
log.Println(err)
ctx.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprint(err)})
return
}
if err := validator.New().Struct(request); err != nil {
errors := []string{}
for _, fieldErr := range err.(validator.ValidationErrors) {
errors = append(errors, fmt.Sprint(fieldErr))
}
log.Println(err)
ctx.JSON(http.StatusBadRequest, gin.H{"errors": errors})
return
}
openMarket := createOpenMarketFromCreateRequest(request)
db, err := mysql.NewMysqlConnection(
os.Getenv("DB_HOST"),
os.Getenv("DB_PORT"),
os.Getenv("DB_DBNAME"),
os.Getenv("DB_USER"),
os.Getenv("DB_PASSWORD"),
)
if err != nil {
log.Println(err)
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
openMarketRepositoryMysql := repository.NewOpenMarketRepositoryMysql(db, log)
openMarketService := domainService.NewOpenMarketService(openMarketRepositoryMysql)
openMarketCreated, err := openMarketService.Create(openMarket)
if err != nil {
log.Println(err)
if errors.Is(err, domainService.OpenMarketServiceAlreadyExistError) {
ctx.JSON(http.StatusUnprocessableEntity, gin.H{"error": fmt.Sprint(err)})
return
}
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
response := createResponseFromOpenMarket(*openMarketCreated)
ctx.JSON(http.StatusCreated, response)
}
func (c *Controller) handleGetOpenMarket(ctx *gin.Context) {
log, err := log.NewLogFile(os.Getenv("APP_LOG_FILENAME"))
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
registryID := ctx.Param("id")
if registryID == "" {
log.Println(gin.H{"error": "registry id required"})
ctx.JSON(http.StatusBadRequest, gin.H{"error": "registry id required"})
return
}
db, err := mysql.NewMysqlConnection(
os.Getenv("DB_HOST"),
os.Getenv("DB_PORT"),
os.Getenv("DB_DBNAME"),
os.Getenv("DB_USER"),
os.Getenv("DB_PASSWORD"),
)
if err != nil {
log.Println(err)
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
openMarketRepositoryMysql := repository.NewOpenMarketRepositoryMysql(db, log)
openMarketService := domainService.NewOpenMarketService(openMarketRepositoryMysql)
openMarketFound, err := openMarketService.GetByRegistryID(registryID)
if err != nil {
log.Println(err)
if errors.Is(err, domainService.OpenMarketServiceNotFoundError) {
ctx.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprint(err)})
return
}
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
response := createResponseFromOpenMarket(*openMarketFound)
ctx.JSON(http.StatusOK, response)
}
func (c *Controller) handleUpdateOpenMarket(ctx *gin.Context) {
log, err := log.NewLogFile(os.Getenv("APP_LOG_FILENAME"))
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
registryID := ctx.Param("id")
if registryID == "" {
log.Println(gin.H{"error": "registry id required"})
ctx.JSON(http.StatusBadRequest, gin.H{"error": "registry id required"})
return
}
var request OpenMarketUpdateRequest
if err := ctx.ShouldBindBodyWith(&request, binding.JSON); err != nil {
log.Println(err)
ctx.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprint(err)})
return
}
openMarket := createOpenMarketFromUpdateRequest(request)
openMarket.RegistryID = registryID
db, err := mysql.NewMysqlConnection(
os.Getenv("DB_HOST"),
os.Getenv("DB_PORT"),
os.Getenv("DB_DBNAME"),
os.Getenv("DB_USER"),
os.Getenv("DB_PASSWORD"),
)
if err != nil {
log.Println(err)
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
openMarketRepositoryMysql := repository.NewOpenMarketRepositoryMysql(db, log)
openMarketService := domainService.NewOpenMarketService(openMarketRepositoryMysql)
openMarketUpdated, err := openMarketService.Update(openMarket)
if err != nil {
log.Println(err)
if errors.Is(err, domainService.OpenMarketServiceNotFoundError) {
ctx.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprint(err)})
return
}
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
response := createResponseFromOpenMarket(*openMarketUpdated)
ctx.JSON(http.StatusOK, response)
}
func (c *Controller) handleDeleteOpenMarket(ctx *gin.Context) {
log, err := log.NewLogFile(os.Getenv("APP_LOG_FILENAME"))
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
registryID := ctx.Param("id")
if registryID == "" {
log.Println(gin.H{"error": "registry id required"})
ctx.JSON(http.StatusBadRequest, gin.H{"error": "registry id required"})
return
}
db, err := mysql.NewMysqlConnection(
os.Getenv("DB_HOST"),
os.Getenv("DB_PORT"),
os.Getenv("DB_DBNAME"),
os.Getenv("DB_USER"),
os.Getenv("DB_PASSWORD"),
)
if err != nil {
log.Println(err)
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
openMarketRepositoryMysql := repository.NewOpenMarketRepositoryMysql(db, log)
openMarketService := domainService.NewOpenMarketService(openMarketRepositoryMysql)
if err := openMarketService.Delete(registryID); err != nil {
log.Println(err)
if errors.Is(err, domainService.OpenMarketServiceNotFoundError) {
ctx.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprint(err)})
return
}
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprint(err)})
return
}
ctx.JSON(http.StatusNoContent, struct{}{})
}
|
[
"\"APP_LOG_FILENAME\"",
"\"DB_HOST\"",
"\"DB_PORT\"",
"\"DB_DBNAME\"",
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"APP_LOG_FILENAME\"",
"\"DB_HOST\"",
"\"DB_PORT\"",
"\"DB_DBNAME\"",
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"APP_LOG_FILENAME\"",
"\"DB_HOST\"",
"\"DB_PORT\"",
"\"DB_DBNAME\"",
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"APP_LOG_FILENAME\"",
"\"DB_HOST\"",
"\"DB_PORT\"",
"\"DB_DBNAME\"",
"\"DB_USER\"",
"\"DB_PASSWORD\"",
"\"APP_LOG_FILENAME\"",
"\"DB_HOST\"",
"\"DB_PORT\"",
"\"DB_DBNAME\"",
"\"DB_USER\"",
"\"DB_PASSWORD\""
] |
[] |
[
"APP_LOG_FILENAME",
"DB_DBNAME",
"DB_PASSWORD",
"DB_HOST",
"DB_PORT",
"DB_USER"
] |
[]
|
["APP_LOG_FILENAME", "DB_DBNAME", "DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_USER"]
|
go
| 6 | 0 | |
pkg/crypto/sha1/go118_export.go
|
// export by github.com/goplus/gossa/cmd/qexp
//go:build go1.18
// +build go1.18
package sha1
import (
q "crypto/sha1"
"go/constant"
"reflect"
"github.com/goplus/gossa"
)
func init() {
gossa.RegisterPackage(&gossa.Package{
Name: "sha1",
Path: "crypto/sha1",
Deps: map[string]string{
"crypto": "crypto",
"encoding/binary": "binary",
"errors": "errors",
"hash": "hash",
"internal/cpu": "cpu",
"math/bits": "bits",
},
Interfaces: map[string]reflect.Type{},
NamedTypes: map[string]gossa.NamedType{},
AliasTypes: map[string]reflect.Type{},
Vars: map[string]reflect.Value{},
Funcs: map[string]reflect.Value{
"New": reflect.ValueOf(q.New),
"Sum": reflect.ValueOf(q.Sum),
},
TypedConsts: map[string]gossa.TypedConst{},
UntypedConsts: map[string]gossa.UntypedConst{
"BlockSize": {"untyped int", constant.MakeInt64(int64(q.BlockSize))},
"Size": {"untyped int", constant.MakeInt64(int64(q.Size))},
},
})
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
setup.py
|
from setuptools import setup, Extension
from Cython.Build import cythonize
import warnings
import numpy as np
import os
import tempfile, subprocess, shutil
import versioneer
DISTNAME='pyqg'
URL='http://github.com/pyqg/pyqg'
AUTHOR='pyqg team'
AUTHOR_EMAIL='[email protected]'
LICENSE='MIT'
DESCRIPTION='python quasigeostrophic model'
LONG_DESCRIPTION="""
pyqg is a python solver for quasigeostrophic systems. Quasigeostophic
equations are an approximation to the full fluid equations of motion in
the limit of strong rotation and stratitifcation and are most applicable
to geophysical fluid dynamics problems.
Students and researchers in ocean and atmospheric dynamics are the intended
audience of pyqg. The model is simple enough to be used by students new to
the field yet powerful enough for research. We strive for clear documentation
and thorough testing.
pyqg supports a variety of different configurations using the same
computational kernel. The different configurations are evolving and are
described in detail in the documentation. The kernel, implement in cython,
uses a pseudo-spectral method which is heavily dependent of the fast Fourier
transform. For this reason, pyqg depends on pyfftw and the FFTW Fourier
Transform library. The kernel is multi-threaded but does not support mpi.
Optimal performance will be achieved on a single system with many cores.
Links
-----
- HTML documentation: http://pyqg.readthedocs.org
- Issue tracker: http://github.com/pyqg/pyqg/issues
- Source code: http://github.com/pyqg/pyqg
"""
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Atmospheric Science'
]
### Dependency section ###
install_requires = [
'cython',
'numpy'
]
# This hack tells cython whether pyfftw is present
use_pyfftw_file = 'pyqg/.compile_time_use_pyfftw.pxi'
with open(use_pyfftw_file, 'wb') as f:
try:
import pyfftw
f.write(b'DEF PYQG_USE_PYFFTW = 1')
except ImportError:
f.write(b'DEF PYQG_USE_PYFFTW = 0')
warnings.warn('Could not import pyfftw. Model may be slower.')
# check for openmp following
# http://stackoverflow.com/questions/16549893/programatically-testing-for-openmp-support-from-a-python-setup-script
# see http://openmp.org/wp/openmp-compilers/
omp_test = \
br"""
#include <omp.h>
#include <stdio.h>
int main() {
#pragma omp parallel
printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads());
}
"""
# python 3 needs rb
def check_for_openmp():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
filename = r'test.c'
try:
cc = os.environ['CC']
except KeyError:
cc = 'gcc'
with open(filename, 'wb', 0) as file:
file.write(omp_test)
with open(os.devnull, 'wb') as fnull:
try:
result = subprocess.call([cc, '-fopenmp', filename],
stdout=fnull, stderr=fnull)
except FileNotFoundError:
result = 1
print('check_for_openmp() result: ', result)
os.chdir(curdir)
#clean up
shutil.rmtree(tmpdir)
return result==0
extra_compile_args = []
extra_link_args = []
use_openmp = True
if check_for_openmp() and use_openmp:
extra_compile_args.append('-fopenmp')
extra_link_args.append('-fopenmp')
else:
warnings.warn('Could not link with openmp. Model will be slow.')
# reathedocs can't and shouldn't build pyfftw
# apparently setup.py overrides docs/requirements.txt
#on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
#if on_rtd:
# install_requires.remove('pyfftw')
tests_require = ['pytest']
def readme():
with open('README.md') as f:
return f.read()
ext_module = Extension(
"pyqg.kernel",
["pyqg/kernel.pyx"],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description=DESCRIPTION,
classifiers=CLASSIFIERS,
long_description=LONG_DESCRIPTION,
url=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
packages=['pyqg'],
install_requires=install_requires,
ext_modules = cythonize(ext_module),
include_dirs = [np.get_include()],
tests_require = tests_require,
test_suite = 'nose.collector',
zip_safe=False)
|
[] |
[] |
[
"CC",
"READTHEDOCS"
] |
[]
|
["CC", "READTHEDOCS"]
|
python
| 2 | 0 | |
projDir/uw/scripts/ftpCSapr2Images.py
|
#!/usr/bin/python
import sys
import os
import time
import datetime
from datetime import timedelta
import requests
from bs4 import BeautifulSoup
from ftplib import FTP
#if len(sys.argv) != 2:
# print >>sys.stderr, "Useage: ",sys.argv[0]," [YYYY_MM_DD]"
# quit()
#date = sys.argv[1]
# get current date and time minus one hour
UTC_OFFSET_TIMEDELTA = datetime.datetime.utcnow() - datetime.datetime.now()
date_1_hour_ago = datetime.datetime.now() - timedelta(hours=1) + UTC_OFFSET_TIMEDELTA
date = date_1_hour_ago.strftime("%Y_%m_%d")
dateNoHyphens = date_1_hour_ago.strftime("%Y%m%d")
hour = date_1_hour_ago.strftime("%H")
#nowTime = time.gmtime()
#now = datetime.datetime(nowTime.tm_year, nowTime.tm_mon, nowTime.tm_mday,
# nowTime.tm_hour, nowTime.tm_min, nowTime.tm_sec)
#date = now.strftime("%Y_%m_%d")
#date = '2018_11_01'
url = 'https://engineering.arm.gov/~radar/amf1_csapr2_incoming_images/hsrhi/'+date+'/'
ext = 'png'
homeDir = os.getenv('HOME')
outDir = os.path.join(homeDir, 'radar/csapr2/' + date)
category = 'radar'
platform = 'DOE_CSapr2'
ftpCatalogServer = 'catalog.eol.ucar.edu'
ftpCatalogUser = 'anonymous'
catalogDestDir = '/pub/incoming/catalog/relampago'
debug = 1
def listFD(url, ext=''):
page = requests.get(url).text
print page
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
if not os.path.exists(outDir):
os.makedirs(outDir)
os.chdir(outDir)
for file in listFD(url, ext):
tmp = os.path.basename(file)
(f,e) = os.path.splitext(tmp)
parts = f.split('_')
(fdate,ftime) = parts[3].split('-')
fhour = ftime[0:2]
if fdate == dateNoHyphens and fhour == hour:
print file
cmd = 'wget '+file
os.system(cmd)
# correct names of -0.0 files
#cmd = 'mmv "*_-0.0.png" "#1_00.0.png"'
#os.system(cmd)
# rename files and ftp them
for file in os.listdir(outDir):
if file.startswith('cor_'):
if debug:
print >>sys.stderr, "file = ",file
(filename, file_ext) = os.path.splitext(file)
parts = filename.split('_')
(date,time) = parts[3].split('-')
angle_parts = parts[5].split('.')
if len(angle_parts[0]) == 1:
angle = '00'+angle_parts[0]
elif len(angle_parts[0]) == 2:
angle = '0'+angle_parts[0]
else:
angle = angle_parts[0]
product = parts[2]+'_'+parts[4]+'_'+angle
file_cat = category+'.'+platform+'.'+date+time+'.'+product+file_ext
if debug:
print >>sys.stderr, "file_cat = ",file_cat
cmd = 'mv '+file+' '+file_cat
os.system(cmd)
# ftp file
try:
catalogFTP = FTP(ftpCatalogServer,ftpCatalogUser)
catalogFTP.cwd(catalogDestDir)
file = open(file_cat,'rb')
catalogFTP.storbinary('STOR '+file_cat,file)
file.close()
catalogFTP.quit()
except Exception as e:
print >>sys.stderr, "FTP failed, exception: ", e
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"database/sql"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"syscall"
_ "github.com/go-sql-driver/mysql"
"github.com/braintree/manners"
"github.com/vynjo/circhashi/handlers"
"github.com/vynjo/circhashi/health"
"github.com/vynjo/circhashi/user"
)
const version = "1.0.0"
func main() {
log.Println("Starting CircHashi...")
vaultToken := os.Getenv("VAULT_TOKEN")
if vaultToken == "" {
log.Fatal("VAULT_TOKEN must be set and non-empty")
}
vaultAddr := os.Getenv("VAULT_ADDR")
if vaultAddr == "" {
log.Fatal("VAULT_ADDR must be set and non-empty")
}
vc, err := newVaultClient(vaultAddr, vaultToken)
if err != nil {
log.Fatal(err)
}
log.Println("Getting JWT shared secret...")
secret, err := vc.getJWTSecret("secret/circhashi")
if err != nil {
log.Fatal(err)
}
log.Println("Getting database credentials...")
username, password, err := vc.getDatabaseCredentials("mysql/creds/circhashi")
if err != nil {
log.Fatal(err)
}
log.Println("Initializing database connection pool...")
dbAddr := os.Getenv("CIRCHASHI_DB_HOST")
dsn := fmt.Sprintf("%s:%s@tcp(%s)/circhashi", username, password, dbAddr)
db, err := sql.Open("mysql", dsn)
if err != nil {
log.Fatal(err)
}
if err := db.Ping(); err != nil {
log.Fatal(err)
}
httpAddr := os.Getenv("NOMAD_ADDR_http")
if httpAddr == "" {
log.Fatal("NOMAD_ADDR_http must be set and non-empty")
}
log.Printf("HTTP service listening on %s", httpAddr)
mux := http.NewServeMux()
mux.HandleFunc("/", handlers.HelloHandler)
mux.Handle("/login", handlers.LoginHandler(secret, user.DB))
mux.Handle("/secure", handlers.JWTAuthHandler(handlers.HelloHandler))
mux.Handle("/version", handlers.VersionHandler(version))
mux.HandleFunc("/healthz", health.HealthzHandler)
mux.HandleFunc("/healthz/status", health.HealthzStatusHandler)
httpServer := manners.NewServer()
httpServer.Addr = httpAddr
httpServer.Handler = handlers.LoggingHandler(mux)
errChan := make(chan error, 10)
go func() {
errChan <- httpServer.ListenAndServe()
}()
go func() {
errChan <- vc.renewDatabaseCredentials()
}()
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
for {
select {
case err := <-errChan:
if err != nil {
log.Fatal(err)
}
case s := <-signalChan:
log.Println(fmt.Sprintf("Captured %v. Exiting...", s))
httpServer.BlockingClose()
os.Exit(0)
}
}
}
|
[
"\"VAULT_TOKEN\"",
"\"VAULT_ADDR\"",
"\"CIRCHASHI_DB_HOST\"",
"\"NOMAD_ADDR_http\""
] |
[] |
[
"VAULT_ADDR",
"NOMAD_ADDR_http",
"VAULT_TOKEN",
"CIRCHASHI_DB_HOST"
] |
[]
|
["VAULT_ADDR", "NOMAD_ADDR_http", "VAULT_TOKEN", "CIRCHASHI_DB_HOST"]
|
go
| 4 | 0 | |
platform/Coursera/Algorithms-Specialization/1-Divide-and-Conquer-Sorting-and-Searching-and-Randomized-Algorithms/3-QuickSort/quicksort.go
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
type pivotID int
const (
firstPivot pivotID = iota
lastPivot
medianOfThreePivot
)
func countComparisons(arr []int, id pivotID) int64 {
return quickSort(arr, 0, len(arr)-1, id)
}
func quickSort(arr []int, left int, right int, id pivotID) int64 {
if left >= right {
return 0
}
choosePivot(arr, left, right, id)
p := partition(arr, left, right)
count := int64(right - left)
count += quickSort(arr, left, p-1, id)
count += quickSort(arr, p+1, right, id)
return count
}
func choosePivot(arr []int, left int, right int, id pivotID) {
switch id {
case firstPivot:
case lastPivot:
arr[left], arr[right] = arr[right], arr[left]
case medianOfThreePivot:
pivotIndex := max(arr, min(arr, left, right), min(arr, max(arr, left, right), (left+right)/2))
arr[left], arr[pivotIndex] = arr[pivotIndex], arr[left]
}
}
func partition(arr []int, left int, right int) int {
pivot := arr[left]
i := left
for j := left + 1; j <= right; j++ {
if arr[j] < pivot {
i++
arr[i], arr[j] = arr[j], arr[i]
}
}
arr[i], arr[left] = arr[left], arr[i]
return i
}
func max(arr []int, left int, right int) int {
if arr[left] > arr[right] {
return left
}
return right
}
func min(arr []int, left int, right int) int {
if arr[left] < arr[right] {
return left
}
return right
}
func main() {
stdin, err := os.Open(os.Getenv("INPUT_PATH"))
if err != nil {
stdin = os.Stdin
}
defer stdin.Close()
stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
if err != nil {
stdout = os.Stdout
}
defer stdout.Close()
reader := bufio.NewScanner(stdin)
writer := bufio.NewWriterSize(stdout, 1024*1024)
arr := make([]int, 0, 64)
for reader.Scan() {
value, err := strconv.Atoi(reader.Text())
checkError(err)
arr = append(arr, value)
}
pivots := []pivotID{firstPivot, lastPivot, medianOfThreePivot}
for _, pivot := range pivots {
array := make([]int, len(arr))
copy(array, arr)
result := countComparisons(array, pivot)
fmt.Fprintln(writer, result)
}
writer.Flush()
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
[
"\"INPUT_PATH\"",
"\"OUTPUT_PATH\""
] |
[] |
[
"INPUT_PATH",
"OUTPUT_PATH"
] |
[]
|
["INPUT_PATH", "OUTPUT_PATH"]
|
go
| 2 | 0 | |
src/e2e_test/protocol_loadtest/server.go
|
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package main
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"os"
"time"
"px.dev/pixie/src/e2e_test/protocol_loadtest/grpc"
"px.dev/pixie/src/e2e_test/protocol_loadtest/http"
)
const (
bitsize = 2048
)
var x509Name = pkix.Name{
Organization: []string{"Pixie Labs Inc."},
Country: []string{"US"},
Province: []string{"California"},
Locality: []string{"San Francisco"},
}
func generateCertFilesOrDie(dnsNames []string) *tls.Config {
ca := &x509.Certificate{
SerialNumber: big.NewInt(1653),
Subject: x509Name,
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(10, 0, 0),
IsCA: true,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
}
caKey, err := rsa.GenerateKey(rand.Reader, bitsize)
if err != nil {
panic(fmt.Errorf("Error generating CA: %v", err))
}
cert := &x509.Certificate{
SerialNumber: big.NewInt(1658),
Subject: x509Name,
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(10, 0, 0),
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
BasicConstraintsValid: true,
DNSNames: dnsNames,
}
privateKey, err := rsa.GenerateKey(rand.Reader, bitsize)
if err != nil {
panic(fmt.Errorf("Error generating private key: %v", err))
}
certBytes, err := x509.CreateCertificate(rand.Reader, cert, ca, &privateKey.PublicKey, caKey)
if err != nil {
panic(fmt.Errorf("Error creating certificate: %v", err))
}
certData := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certBytes})
if err != nil {
panic(fmt.Errorf("Error encoding cert data: %v", err))
}
keyData := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
if err != nil {
panic(fmt.Errorf("Error encoding key data: %v", err))
}
pair, err := tls.X509KeyPair(certData, keyData)
if err != nil {
panic(fmt.Errorf("Error loading keypair: %v", err))
}
certPool := x509.NewCertPool()
certPool.AddCert(ca)
return &tls.Config{
Certificates: []tls.Certificate{pair},
ClientAuth: tls.NoClientCert,
RootCAs: certPool,
}
}
func main() {
var tlsConfig *tls.Config
httpPort := os.Getenv("HTTP_PORT")
httpSSLPort := os.Getenv("HTTP_SSL_PORT")
grpcPort := os.Getenv("GRPC_PORT")
grpcSSLPort := os.Getenv("GRPC_SSL_PORT")
if httpSSLPort != "" || grpcSSLPort != "" {
tlsConfig = generateCertFilesOrDie([]string{"localhost"})
grpcTLSConfig := tlsConfig.Clone()
grpcTLSConfig.NextProtos = []string{"h2"}
}
go http.RunHTTPServers(tlsConfig, httpPort, httpSSLPort)
grpc.RunGRPCServers(tlsConfig, grpcPort, grpcSSLPort)
}
|
[
"\"HTTP_PORT\"",
"\"HTTP_SSL_PORT\"",
"\"GRPC_PORT\"",
"\"GRPC_SSL_PORT\""
] |
[] |
[
"GRPC_SSL_PORT",
"HTTP_SSL_PORT",
"GRPC_PORT",
"HTTP_PORT"
] |
[]
|
["GRPC_SSL_PORT", "HTTP_SSL_PORT", "GRPC_PORT", "HTTP_PORT"]
|
go
| 4 | 0 | |
superset/views/base.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import dataclasses
import functools
import logging
import traceback
from datetime import datetime
from typing import Any, Callable, cast, Dict, List, Optional, Union
import simplejson as json
import yaml
from flask import (
abort,
flash,
g,
get_flashed_messages,
redirect,
request,
Response,
send_file,
session,
)
from flask_appbuilder import BaseView, Model, ModelView
from flask_appbuilder.actions import action
from flask_appbuilder.forms import DynamicForm
from flask_appbuilder.models.sqla.filters import BaseFilter
from flask_appbuilder.security.sqla.models import User
from flask_appbuilder.widgets import ListWidget
from flask_babel import get_locale, gettext as __, lazy_gettext as _
from flask_jwt_extended.exceptions import NoAuthorizationError
from flask_wtf.csrf import CSRFError
from flask_wtf.form import FlaskForm
from pkg_resources import resource_filename
from sqlalchemy import exc, or_
from sqlalchemy.orm import Query
from werkzeug.exceptions import HTTPException
from wtforms import Form
from wtforms.fields.core import Field, UnboundField
from superset import (
app as superset_app,
appbuilder,
conf,
db,
get_feature_flags,
security_manager,
)
from superset.commands.exceptions import CommandException, CommandInvalidError
from superset.connectors.sqla import models
from superset.datasets.commands.exceptions import get_dataset_exist_error_msg
from superset.db_engine_specs import get_available_engine_specs
from superset.db_engine_specs.gsheets import GSheetsEngineSpec
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import (
SupersetErrorException,
SupersetErrorsException,
SupersetException,
SupersetSecurityException,
)
from superset.models.helpers import ImportExportMixin
from superset.models.reports import ReportRecipientType
from superset.superset_typing import FlaskResponse
from superset.translations.utils import get_language_pack
from superset.utils import core as utils
from .utils import bootstrap_user_data
FRONTEND_CONF_KEYS = (
"SUPERSET_WEBSERVER_TIMEOUT",
"SUPERSET_DASHBOARD_POSITION_DATA_LIMIT",
"SUPERSET_DASHBOARD_PERIODICAL_REFRESH_LIMIT",
"SUPERSET_DASHBOARD_PERIODICAL_REFRESH_WARNING_MESSAGE",
"DISABLE_DATASET_SOURCE_EDIT",
"ENABLE_JAVASCRIPT_CONTROLS",
"DEFAULT_SQLLAB_LIMIT",
"DEFAULT_VIZ_TYPE",
"SQL_MAX_ROW",
"SUPERSET_WEBSERVER_DOMAINS",
"SQLLAB_SAVE_WARNING_MESSAGE",
"DISPLAY_MAX_ROW",
"GLOBAL_ASYNC_QUERIES_TRANSPORT",
"GLOBAL_ASYNC_QUERIES_POLLING_DELAY",
"SQL_VALIDATORS_BY_ENGINE",
"SQLALCHEMY_DOCS_URL",
"SQLALCHEMY_DISPLAY_TEXT",
"GLOBAL_ASYNC_QUERIES_WEBSOCKET_URL",
"DASHBOARD_AUTO_REFRESH_MODE",
"SCHEDULED_QUERIES",
"EXCEL_EXTENSIONS",
"CSV_EXTENSIONS",
"COLUMNAR_EXTENSIONS",
"ALLOWED_EXTENSIONS",
)
logger = logging.getLogger(__name__)
config = superset_app.config
def get_error_msg() -> str:
if conf.get("SHOW_STACKTRACE"):
error_msg = traceback.format_exc()
else:
error_msg = "FATAL ERROR \n"
error_msg += (
"Stacktrace is hidden. Change the SHOW_STACKTRACE "
"configuration setting to enable it"
)
return error_msg
def json_error_response(
msg: Optional[str] = None,
status: int = 500,
payload: Optional[Dict[str, Any]] = None,
link: Optional[str] = None,
) -> FlaskResponse:
if not payload:
payload = {"error": "{}".format(msg)}
if link:
payload["link"] = link
return Response(
json.dumps(payload, default=utils.json_iso_dttm_ser, ignore_nan=True),
status=status,
mimetype="application/json",
)
def json_errors_response(
errors: List[SupersetError],
status: int = 500,
payload: Optional[Dict[str, Any]] = None,
) -> FlaskResponse:
if not payload:
payload = {}
payload["errors"] = [dataclasses.asdict(error) for error in errors]
return Response(
json.dumps(payload, default=utils.json_iso_dttm_ser, ignore_nan=True),
status=status,
mimetype="application/json; charset=utf-8",
)
def json_success(json_msg: str, status: int = 200) -> FlaskResponse:
return Response(json_msg, status=status, mimetype="application/json")
def data_payload_response(payload_json: str, has_error: bool = False) -> FlaskResponse:
status = 400 if has_error else 200
return json_success(payload_json, status=status)
def generate_download_headers(
extension: str, filename: Optional[str] = None
) -> Dict[str, Any]:
filename = filename if filename else datetime.now().strftime("%Y%m%d_%H%M%S")
content_disp = f"attachment; filename={filename}.{extension}"
headers = {"Content-Disposition": content_disp}
return headers
def api(f: Callable[..., FlaskResponse]) -> Callable[..., FlaskResponse]:
"""
A decorator to label an endpoint as an API. Catches uncaught exceptions and
return the response in the JSON format
"""
def wraps(self: "BaseSupersetView", *args: Any, **kwargs: Any) -> FlaskResponse:
try:
return f(self, *args, **kwargs)
except NoAuthorizationError as ex:
logger.warning(ex)
return json_error_response(get_error_msg(), status=401)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
return json_error_response(get_error_msg())
return functools.update_wrapper(wraps, f)
def handle_api_exception(
f: Callable[..., FlaskResponse]
) -> Callable[..., FlaskResponse]:
"""
A decorator to catch superset exceptions. Use it after the @api decorator above
so superset exception handler is triggered before the handler for generic
exceptions.
"""
def wraps(self: "BaseSupersetView", *args: Any, **kwargs: Any) -> FlaskResponse:
try:
return f(self, *args, **kwargs)
except SupersetSecurityException as ex:
logger.warning(ex)
return json_errors_response(
errors=[ex.error], status=ex.status, payload=ex.payload
)
except SupersetErrorsException as ex:
logger.warning(ex, exc_info=True)
return json_errors_response(errors=ex.errors, status=ex.status)
except SupersetErrorException as ex:
logger.warning(ex)
return json_errors_response(errors=[ex.error], status=ex.status)
except SupersetException as ex:
if ex.status >= 500:
logger.exception(ex)
return json_error_response(
utils.error_msg_from_exception(ex), status=ex.status
)
except HTTPException as ex:
logger.exception(ex)
return json_error_response(
utils.error_msg_from_exception(ex), status=cast(int, ex.code)
)
except (exc.IntegrityError, exc.DatabaseError, exc.DataError) as ex:
logger.exception(ex)
return json_error_response(utils.error_msg_from_exception(ex), status=422)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
return json_error_response(utils.error_msg_from_exception(ex))
return functools.update_wrapper(wraps, f)
def validate_sqlatable(table: models.SqlaTable) -> None:
"""Checks the table existence in the database."""
with db.session.no_autoflush:
table_query = db.session.query(models.SqlaTable).filter(
models.SqlaTable.table_name == table.table_name,
models.SqlaTable.schema == table.schema,
models.SqlaTable.database_id == table.database.id,
)
if db.session.query(table_query.exists()).scalar():
raise Exception(get_dataset_exist_error_msg(table.full_name))
# Fail before adding if the table can't be found
try:
table.get_sqla_table_object()
except Exception as ex:
logger.exception("Got an error in pre_add for %s", table.name)
raise Exception(
_(
"Table [%{table}s] could not be found, "
"please double check your "
"database connection, schema, and "
"table name, error: {}"
).format(table.name, str(ex))
) from ex
def create_table_permissions(table: models.SqlaTable) -> None:
security_manager.add_permission_view_menu("datasource_access", table.get_perm())
if table.schema:
security_manager.add_permission_view_menu("schema_access", table.schema_perm)
def is_user_admin() -> bool:
user_roles = [role.name.lower() for role in list(security_manager.get_user_roles())]
return "admin" in user_roles
class BaseSupersetView(BaseView):
@staticmethod
def json_response(obj: Any, status: int = 200) -> FlaskResponse:
return Response(
json.dumps(obj, default=utils.json_int_dttm_ser, ignore_nan=True),
status=status,
mimetype="application/json",
)
def render_app_template(self) -> FlaskResponse:
payload = {
"user": bootstrap_user_data(g.user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/spa.html",
entry="spa",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
def menu_data() -> Dict[str, Any]:
menu = appbuilder.menu.get_data()
languages = {}
for lang in appbuilder.languages:
languages[lang] = {
**appbuilder.languages[lang],
"url": appbuilder.get_url_for_locale(lang),
}
brand_text = appbuilder.app.config["LOGO_RIGHT_TEXT"]
if callable(brand_text):
brand_text = brand_text()
build_number = appbuilder.app.config["BUILD_NUMBER"]
return {
"menu": menu,
"brand": {
"path": appbuilder.app.config["LOGO_TARGET_PATH"] or "/",
"icon": appbuilder.app_icon,
"alt": appbuilder.app_name,
"tooltip": appbuilder.app.config["LOGO_TOOLTIP"],
"text": brand_text,
},
"navbar_right": {
# show the watermark if the default app icon has been overriden
"show_watermark": ("superset-logo-horiz" not in appbuilder.app_icon),
"bug_report_url": appbuilder.app.config["BUG_REPORT_URL"],
"documentation_url": appbuilder.app.config["DOCUMENTATION_URL"],
"version_string": appbuilder.app.config["VERSION_STRING"],
"version_sha": appbuilder.app.config["VERSION_SHA"],
"build_number": build_number,
"languages": languages,
"show_language_picker": len(languages.keys()) > 1,
"user_is_anonymous": g.user.is_anonymous,
"user_info_url": None
if appbuilder.app.config["MENU_HIDE_USER_INFO"]
else appbuilder.get_url_for_userinfo,
"user_logout_url": appbuilder.get_url_for_logout,
"user_login_url": appbuilder.get_url_for_login,
"user_profile_url": None
if g.user.is_anonymous or appbuilder.app.config["MENU_HIDE_USER_INFO"]
else f"/superset/profile/{g.user.username}",
"locale": session.get("locale", "en"),
},
}
def common_bootstrap_payload() -> Dict[str, Any]:
"""Common data always sent to the client"""
messages = get_flashed_messages(with_categories=True)
locale = str(get_locale())
# should not expose API TOKEN to frontend
frontend_config = {
k: (list(conf.get(k)) if isinstance(conf.get(k), set) else conf.get(k))
for k in FRONTEND_CONF_KEYS
}
if conf.get("SLACK_API_TOKEN"):
frontend_config["ALERT_REPORTS_NOTIFICATION_METHODS"] = [
ReportRecipientType.EMAIL,
ReportRecipientType.SLACK,
]
else:
frontend_config["ALERT_REPORTS_NOTIFICATION_METHODS"] = [
ReportRecipientType.EMAIL,
]
# verify client has google sheets installed
available_specs = get_available_engine_specs()
frontend_config["HAS_GSHEETS_INSTALLED"] = bool(available_specs[GSheetsEngineSpec])
bootstrap_data = {
"flash_messages": messages,
"conf": frontend_config,
"locale": locale,
"language_pack": get_language_pack(locale),
"feature_flags": get_feature_flags(),
"extra_sequential_color_schemes": conf["EXTRA_SEQUENTIAL_COLOR_SCHEMES"],
"extra_categorical_color_schemes": conf["EXTRA_CATEGORICAL_COLOR_SCHEMES"],
"theme_overrides": conf["THEME_OVERRIDES"],
"menu_data": menu_data(),
}
bootstrap_data.update(conf["COMMON_BOOTSTRAP_OVERRIDES_FUNC"](bootstrap_data))
return bootstrap_data
def get_error_level_from_status_code( # pylint: disable=invalid-name
status: int,
) -> ErrorLevel:
if status < 400:
return ErrorLevel.INFO
if status < 500:
return ErrorLevel.WARNING
return ErrorLevel.ERROR
# SIP-40 compatible error responses; make sure APIs raise
# SupersetErrorException or SupersetErrorsException
@superset_app.errorhandler(SupersetErrorException)
def show_superset_error(ex: SupersetErrorException) -> FlaskResponse:
logger.warning(ex)
return json_errors_response(errors=[ex.error], status=ex.status)
@superset_app.errorhandler(SupersetErrorsException)
def show_superset_errors(ex: SupersetErrorsException) -> FlaskResponse:
logger.warning(ex)
return json_errors_response(errors=ex.errors, status=ex.status)
# Redirect to login if the CSRF token is expired
@superset_app.errorhandler(CSRFError)
def refresh_csrf_token(ex: CSRFError) -> FlaskResponse:
logger.warning(ex)
if request.is_json:
return show_http_exception(ex)
return redirect(appbuilder.get_url_for_login)
@superset_app.errorhandler(HTTPException)
def show_http_exception(ex: HTTPException) -> FlaskResponse:
logger.warning(ex)
if (
"text/html" in request.accept_mimetypes
and not config["DEBUG"]
and ex.code in {404, 500}
):
path = resource_filename("superset", f"static/assets/{ex.code}.html")
return send_file(path, cache_timeout=0), ex.code
return json_errors_response(
errors=[
SupersetError(
message=utils.error_msg_from_exception(ex),
error_type=SupersetErrorType.GENERIC_BACKEND_ERROR,
level=ErrorLevel.ERROR,
),
],
status=ex.code or 500,
)
# Temporary handler for CommandException; if an API raises a
# CommandException it should be fixed to map it to SupersetErrorException
# or SupersetErrorsException, with a specific status code and error type
@superset_app.errorhandler(CommandException)
def show_command_errors(ex: CommandException) -> FlaskResponse:
logger.warning(ex)
if "text/html" in request.accept_mimetypes and not config["DEBUG"]:
path = resource_filename("superset", "static/assets/500.html")
return send_file(path, cache_timeout=0), 500
extra = ex.normalized_messages() if isinstance(ex, CommandInvalidError) else {}
return json_errors_response(
errors=[
SupersetError(
message=ex.message,
error_type=SupersetErrorType.GENERIC_COMMAND_ERROR,
level=get_error_level_from_status_code(ex.status),
extra=extra,
),
],
status=ex.status,
)
# Catch-all, to ensure all errors from the backend conform to SIP-40
@superset_app.errorhandler(Exception)
def show_unexpected_exception(ex: Exception) -> FlaskResponse:
logger.exception(ex)
if "text/html" in request.accept_mimetypes and not config["DEBUG"]:
path = resource_filename("superset", "static/assets/500.html")
return send_file(path, cache_timeout=0), 500
return json_errors_response(
errors=[
SupersetError(
message=utils.error_msg_from_exception(ex),
error_type=SupersetErrorType.GENERIC_BACKEND_ERROR,
level=ErrorLevel.ERROR,
),
],
)
@superset_app.context_processor
def get_common_bootstrap_data() -> Dict[str, Any]:
def serialize_bootstrap_data() -> str:
return json.dumps(
{"common": common_bootstrap_payload()},
default=utils.pessimistic_json_iso_dttm_ser,
)
return {"bootstrap_data": serialize_bootstrap_data}
class SupersetListWidget(ListWidget): # pylint: disable=too-few-public-methods
template = "superset/fab_overrides/list.html"
class SupersetModelView(ModelView):
page_size = 100
list_widget = SupersetListWidget
def render_app_template(self) -> FlaskResponse:
payload = {
"user": bootstrap_user_data(g.user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/spa.html",
entry="spa",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
class ListWidgetWithCheckboxes(ListWidget): # pylint: disable=too-few-public-methods
"""An alternative to list view that renders Boolean fields as checkboxes
Works in conjunction with the `checkbox` view."""
template = "superset/fab_overrides/list_with_checkboxes.html"
def validate_json(form: Form, field: Field) -> None: # pylint: disable=unused-argument
try:
json.loads(field.data)
except Exception as ex:
logger.exception(ex)
raise Exception(_("json isn't valid")) from ex
class YamlExportMixin: # pylint: disable=too-few-public-methods
"""
Override this if you want a dict response instead, with a certain key.
Used on DatabaseView for cli compatibility
"""
yaml_dict_key: Optional[str] = None
@action("yaml_export", __("Export to YAML"), __("Export to YAML?"), "fa-download")
def yaml_export(
self, items: Union[ImportExportMixin, List[ImportExportMixin]]
) -> FlaskResponse:
if not isinstance(items, list):
items = [items]
data = [t.export_to_dict() for t in items]
return Response(
yaml.safe_dump({self.yaml_dict_key: data} if self.yaml_dict_key else data),
headers=generate_download_headers("yaml"),
mimetype="application/text",
)
class DeleteMixin: # pylint: disable=too-few-public-methods
def _delete(self: BaseView, primary_key: int) -> None:
"""
Delete function logic, override to implement diferent logic
deletes the record with primary_key = primary_key
:param primary_key:
record primary key to delete
"""
item = self.datamodel.get(primary_key, self._base_filters)
if not item:
abort(404)
try:
self.pre_delete(item)
except Exception as ex: # pylint: disable=broad-except
flash(str(ex), "danger")
else:
view_menu = security_manager.find_view_menu(item.get_perm())
pvs = (
security_manager.get_session.query(
security_manager.permissionview_model
)
.filter_by(view_menu=view_menu)
.all()
)
if self.datamodel.delete(item):
self.post_delete(item)
for pv in pvs:
security_manager.get_session.delete(pv)
if view_menu:
security_manager.get_session.delete(view_menu)
security_manager.get_session.commit()
flash(*self.datamodel.message)
self.update_redirect()
@action(
"muldelete", __("Delete"), __("Delete all Really?"), "fa-trash", single=False
)
def muldelete(self: BaseView, items: List[Model]) -> FlaskResponse:
if not items:
abort(404)
for item in items:
try:
self.pre_delete(item)
except Exception as ex: # pylint: disable=broad-except
flash(str(ex), "danger")
else:
self._delete(item.id)
self.update_redirect()
return redirect(self.get_redirect())
class DatasourceFilter(BaseFilter): # pylint: disable=too-few-public-methods
def apply(self, query: Query, value: Any) -> Query:
if security_manager.can_access_all_datasources():
return query
datasource_perms = security_manager.user_view_menu_names("datasource_access")
schema_perms = security_manager.user_view_menu_names("schema_access")
return query.filter(
or_(
self.model.perm.in_(datasource_perms),
self.model.schema_perm.in_(schema_perms),
)
)
class CsvResponse(Response):
"""
Override Response to take into account csv encoding from config.py
"""
charset = conf["CSV_EXPORT"].get("encoding", "utf-8")
default_mimetype = "text/csv"
def check_ownership(obj: Any, raise_if_false: bool = True) -> bool:
"""Meant to be used in `pre_update` hooks on models to enforce ownership
Admin have all access, and other users need to be referenced on either
the created_by field that comes with the ``AuditMixin``, or in a field
named ``owners`` which is expected to be a one-to-many with the User
model. It is meant to be used in the ModelView's pre_update hook in
which raising will abort the update.
"""
if not obj:
return False
security_exception = SupersetSecurityException(
SupersetError(
error_type=SupersetErrorType.MISSING_OWNERSHIP_ERROR,
message="You don't have the rights to alter [{}]".format(obj),
level=ErrorLevel.ERROR,
)
)
if g.user.is_anonymous:
if raise_if_false:
raise security_exception
return False
if is_user_admin():
return True
scoped_session = db.create_scoped_session()
orig_obj = scoped_session.query(obj.__class__).filter_by(id=obj.id).first()
# Making a list of owners that works across ORM models
owners: List[User] = []
if hasattr(orig_obj, "owners"):
owners += orig_obj.owners
if hasattr(orig_obj, "owner"):
owners += [orig_obj.owner]
if hasattr(orig_obj, "created_by"):
owners += [orig_obj.created_by]
owner_names = [o.username for o in owners if o]
if g.user and hasattr(g.user, "username") and g.user.username in owner_names:
return True
if raise_if_false:
raise security_exception
return False
def bind_field(
_: Any, form: DynamicForm, unbound_field: UnboundField, options: Dict[Any, Any]
) -> Field:
"""
Customize how fields are bound by stripping all whitespace.
:param form: The form
:param unbound_field: The unbound field
:param options: The field options
:returns: The bound field
"""
filters = unbound_field.kwargs.get("filters", [])
filters.append(lambda x: x.strip() if isinstance(x, str) else x)
return unbound_field.bind(form=form, filters=filters, **options)
FlaskForm.Meta.bind_field = bind_field
@superset_app.after_request
def apply_http_headers(response: Response) -> Response:
"""Applies the configuration's http headers to all responses"""
# HTTP_HEADERS is deprecated, this provides backwards compatibility
response.headers.extend( # type: ignore
{**config["OVERRIDE_HTTP_HEADERS"], **config["HTTP_HEADERS"]}
)
for k, v in config["DEFAULT_HTTP_HEADERS"].items():
if k not in response.headers:
response.headers[k] = v
return response
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
storage/cache.go
|
package storage
import (
"os"
"github.com/go-redis/redis"
)
// Cache is an interface that allows cache operations to be wrapped and mocked
type Cache interface {
HGet(key, field string) (string, error)
HGetAll(key string) (map[string]string, error)
HSet(key, field string, value interface{}) (bool, error)
SCard(key string) (int64, error)
SIsMember(key string, member interface{}) (bool, error)
SMembers(key string) ([]string, error)
TxPipeline() redis.Pipeliner
Z(score float64, member interface{}) redis.Z
ZRangeByScore(key string, opt redis.ZRangeBy) ([]string, error)
}
// NewCache creates a wrapped Redis client
func NewCache() *Redis {
addr := os.Getenv("REDIS_ADDR")
if addr == "" {
addr = "localhost:6379"
}
opts := &redis.Options{
Addr: addr,
}
cache := &Redis{
Client: redis.NewClient(opts),
}
return cache
}
|
[
"\"REDIS_ADDR\""
] |
[] |
[
"REDIS_ADDR"
] |
[]
|
["REDIS_ADDR"]
|
go
| 1 | 0 | |
configx/provider.go
|
package configx
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/sirupsen/logrus"
"github.com/ory/x/logrusx"
"github.com/ory/x/jsonschemax"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/log"
"github.com/ory/jsonschema/v3"
"github.com/ory/x/watcherx"
"github.com/inhies/go-bytesize"
"github.com/knadh/koanf/providers/posflag"
"github.com/spf13/pflag"
"github.com/ory/x/stringsx"
"github.com/ory/x/tracing"
"github.com/knadh/koanf"
"github.com/knadh/koanf/parsers/json"
"github.com/pkg/errors"
"github.com/rs/cors"
)
type tuple struct {
Key string
Value interface{}
}
type Provider struct {
l sync.Mutex
*koanf.Koanf
immutables []string
originalContext context.Context
cancelFork context.CancelFunc
schema []byte
flags *pflag.FlagSet
validator *jsonschema.Schema
onChanges []func(watcherx.Event, error)
onValidationError func(k *koanf.Koanf, err error)
excludeFieldsFromTracing []string
tracer *tracing.Tracer
forcedValues []tuple
baseValues []tuple
files []string
skipValidation bool
logger *logrusx.Logger
}
const (
FlagConfig = "config"
Delimiter = "."
)
// RegisterConfigFlag registers the "--config" flag on pflag.FlagSet.
func RegisterConfigFlag(flags *pflag.FlagSet, fallback []string) {
flags.StringSliceP(FlagConfig, "c", fallback, "Config files to load, overwriting in the order specified.")
}
// New creates a new provider instance or errors.
// Configuration values are loaded in the following order:
//
// 1. Defaults from the JSON Schema
// 2. Config files (yaml, yml, toml, json)
// 3. Command line flags
// 4. Environment variables
func New(schema []byte, modifiers ...OptionModifier) (*Provider, error) {
schemaID, comp, err := newCompiler(schema)
if err != nil {
return nil, err
}
validator, err := comp.Compile(schemaID)
if err != nil {
return nil, err
}
l := logrus.New()
l.Out = ioutil.Discard
p := &Provider{
originalContext: context.Background(),
schema: schema,
validator: validator,
onValidationError: func(k *koanf.Koanf, err error) {},
excludeFieldsFromTracing: []string{"dsn", "secret", "password", "key"},
logger: logrusx.New("discarding config logger", "", logrusx.UseLogger(l)),
}
for _, m := range modifiers {
m(p)
}
k, _, cancelFork, err := p.forkKoanf()
if err != nil {
return nil, err
}
p.replaceKoanf(k, cancelFork)
return p, nil
}
func (p *Provider) replaceKoanf(k *koanf.Koanf, cancelFork context.CancelFunc) {
p.l.Lock()
defer p.l.Unlock()
if p.cancelFork != nil {
p.cancelFork()
}
p.Koanf = k
p.cancelFork = cancelFork
}
func (p *Provider) validate(k *koanf.Koanf) error {
if p.skipValidation {
return nil
}
out, err := k.Marshal(json.Parser())
if err != nil {
return errors.WithStack(err)
}
if err := p.validator.Validate(bytes.NewReader(out)); err != nil {
p.onValidationError(k, err)
return err
}
return nil
}
func (p *Provider) forkKoanf() (*koanf.Koanf, context.Context, context.CancelFunc, error) {
fork, cancel := context.WithCancel(p.originalContext)
span, fork := p.startSpan(fork, LoadSpanOpName)
defer span.Finish()
k := koanf.New(Delimiter)
dp, err := NewKoanfSchemaDefaults(p.schema)
if err != nil {
cancel()
return nil, nil, nil, err
}
ep, err := NewKoanfEnv("", p.schema)
if err != nil {
cancel()
return nil, nil, nil, err
}
// Load defaults
if err := k.Load(dp, nil); err != nil {
cancel()
return nil, nil, nil, err
}
for _, t := range p.baseValues {
if err := k.Load(NewKoanfConfmap([]tuple{t}), nil); err != nil {
cancel()
return nil, nil, nil, err
}
}
var paths []string
if p.flags != nil {
p, _ := p.flags.GetStringSlice(FlagConfig)
paths = append(paths, p...)
}
if err := p.addAndWatchConfigFiles(fork, append(p.files, paths...), k); err != nil {
cancel()
return nil, nil, nil, err
}
if p.flags != nil {
if err := k.Load(posflag.Provider(p.flags, ".", k), nil); err != nil {
cancel()
return nil, nil, nil, err
}
}
if err := k.Load(ep, nil); err != nil {
cancel()
return nil, nil, nil, err
}
// Workaround for https://github.com/knadh/koanf/pull/47
for _, t := range p.forcedValues {
if err := k.Load(NewKoanfConfmap([]tuple{t}), nil); err != nil {
cancel()
return nil, nil, nil, err
}
}
if err := p.validate(k); err != nil {
cancel()
return nil, nil, nil, err
}
p.traceConfig(fork, k, LoadSpanOpName)
return k, fork, cancel, nil
}
// TraceSnapshot will send the configuration to the tracer.
func (p *Provider) SetTracer(ctx context.Context, t *tracing.Tracer) {
p.tracer = t
p.traceConfig(ctx, p.Koanf, SnapshotSpanOpName)
}
func (p *Provider) startSpan(ctx context.Context, opName string) (opentracing.Span, context.Context) {
tracer := opentracing.GlobalTracer()
if p.tracer != nil && p.tracer.Tracer() != nil {
tracer = p.tracer.Tracer()
}
return opentracing.StartSpanFromContextWithTracer(ctx, tracer, opName)
}
func (p *Provider) traceConfig(ctx context.Context, k *koanf.Koanf, opName string) {
span, ctx := p.startSpan(ctx, opName)
defer span.Finish()
span.SetTag("component", "github.com/ory/x/configx")
fields := make([]log.Field, 0, len(k.Keys()))
for _, key := range k.Keys() {
var redact bool
for _, e := range p.excludeFieldsFromTracing {
if strings.Contains(key, e) {
redact = true
}
}
if redact {
fields = append(fields, log.Object(key, "[redacted]"))
} else {
fields = append(fields, log.Object(key, k.Get(key)))
}
}
span.LogFields(fields...)
}
func (p *Provider) runOnChanges(e watcherx.Event, err error) {
for k := range p.onChanges {
p.onChanges[k](e, err)
}
}
func (p *Provider) addAndWatchConfigFiles(ctx context.Context, paths []string, k *koanf.Koanf) error {
p.logger.WithField("files", paths).Debug("Adding config files.")
watchForFileChanges := func(c watcherx.EventChannel) {
// Channel is closed automatically on ctx.Done() because of fp.WatchChannel()
for e := range c {
switch et := e.(type) {
case *watcherx.ErrorEvent:
p.runOnChanges(e, et)
continue
default:
nk, _, cancel, err := p.forkKoanf()
if err != nil {
p.runOnChanges(e, err)
continue
}
var cancelReload bool
for _, key := range p.immutables {
if !reflect.DeepEqual(k.Get(key), nk.Get(key)) {
cancel()
cancelReload = true
p.runOnChanges(e, NewImmutableError(key, fmt.Sprintf("%v", k.Get(key)), fmt.Sprintf("%v", nk.Get(key))))
break
}
}
if cancelReload {
continue
}
p.replaceKoanf(nk, cancel)
p.runOnChanges(e, nil)
}
}
}
for _, path := range paths {
fp, err := NewKoanfFile(ctx, path)
if err != nil {
return err
}
if err := k.Load(fp, nil); err != nil {
return err
}
c := make(watcherx.EventChannel)
if _, err := fp.WatchChannel(c); err != nil {
return err
}
go watchForFileChanges(c)
}
return nil
}
func (p *Provider) Set(key string, value interface{}) error {
p.forcedValues = append(p.forcedValues, tuple{Key: key, Value: value})
k, _, cancel, err := p.forkKoanf()
if err != nil {
return err
}
p.replaceKoanf(k, cancel)
return nil
}
func (p *Provider) BoolF(key string, fallback bool) bool {
if !p.Koanf.Exists(key) {
return fallback
}
return p.Bool(key)
}
func (p *Provider) StringF(key string, fallback string) string {
if !p.Koanf.Exists(key) {
return fallback
}
return p.String(key)
}
func (p *Provider) StringsF(key string, fallback []string) (val []string) {
if !p.Koanf.Exists(key) {
return fallback
}
return p.Strings(key)
}
func (p *Provider) IntF(key string, fallback int) (val int) {
if !p.Koanf.Exists(key) {
return fallback
}
return p.Int(key)
}
func (p *Provider) Float64F(key string, fallback float64) (val float64) {
if !p.Koanf.Exists(key) {
return fallback
}
return p.Float64(key)
}
func (p *Provider) DurationF(key string, fallback time.Duration) (val time.Duration) {
if !p.Koanf.Exists(key) {
return fallback
}
return p.Duration(key)
}
func (p *Provider) ByteSizeF(key string, fallback bytesize.ByteSize) bytesize.ByteSize {
if !p.Koanf.Exists(key) {
return fallback
}
switch v := p.Koanf.Get(key).(type) {
case string:
// this type usually comes from user input
dec, err := bytesize.Parse(v)
if err != nil {
p.logger.WithField("key", key).WithField("raw_value", v).WithError(err).Warnf("error parsing byte size value, using fallback of %s", fallback)
return fallback
}
return dec
case float64:
// this type comes from json.Unmarshal
return bytesize.ByteSize(v)
case bytesize.ByteSize:
return v
default:
p.logger.WithField("key", key).WithField("raw_type", fmt.Sprintf("%T", v)).WithField("raw_value", fmt.Sprintf("%+v", v)).Errorf("error converting byte size value because of unknown type, using fallback of %s", fallback)
return fallback
}
}
func (p *Provider) GetF(key string, fallback interface{}) (val interface{}) {
if !p.Exists(key) {
return fallback
}
return p.Get(key)
}
func (p *Provider) CORS(prefix string, defaults cors.Options) (cors.Options, bool) {
if len(prefix) > 0 {
prefix = strings.TrimRight(prefix, ".") + "."
}
return cors.Options{
AllowedOrigins: p.StringsF(prefix+"cors.allowed_origins", defaults.AllowedOrigins),
AllowedMethods: p.StringsF(prefix+"cors.allowed_methods", defaults.AllowedMethods),
AllowedHeaders: p.StringsF(prefix+"cors.allowed_headers", defaults.AllowedHeaders),
ExposedHeaders: p.StringsF(prefix+"cors.exposed_headers", defaults.ExposedHeaders),
AllowCredentials: p.BoolF(prefix+"cors.allow_credentials", defaults.AllowCredentials),
OptionsPassthrough: p.BoolF(prefix+"cors.options_passthrough", defaults.OptionsPassthrough),
MaxAge: p.IntF(prefix+"cors.max_age", defaults.MaxAge),
Debug: p.BoolF(prefix+"cors.debug", defaults.Debug),
}, p.Bool(prefix + "cors.enabled")
}
func (p *Provider) TracingConfig(serviceName string) *tracing.Config {
return &tracing.Config{
ServiceName: p.StringF("tracing.service_name", serviceName),
Provider: p.String("tracing.provider"),
Jaeger: &tracing.JaegerConfig{
LocalAgentHostPort: p.String("tracing.providers.jaeger.local_agent_address"),
SamplerType: p.StringF("tracing.providers.jaeger.sampling.type", "const"),
SamplerValue: p.Float64F("tracing.providers.jaeger.sampling.value", float64(1)),
SamplerServerURL: p.String("tracing.providers.jaeger.sampling.server_url"),
Propagation: stringsx.Coalesce(
os.Getenv("JAEGER_PROPAGATION"),
p.String("tracing.providers.jaeger.propagation"),
),
},
Zipkin: &tracing.ZipkinConfig{
ServerURL: p.String("tracing.providers.zipkin.server_url"),
},
}
}
func (p *Provider) RequestURIF(path string, fallback *url.URL) *url.URL {
switch t := p.Get(path).(type) {
case *url.URL:
return t
case url.URL:
return &t
case string:
if parsed, err := url.ParseRequestURI(t); err == nil {
return parsed
}
}
return fallback
}
func (p *Provider) URIF(path string, fallback *url.URL) *url.URL {
switch t := p.Get(path).(type) {
case *url.URL:
return t
case url.URL:
return &t
case string:
if parsed, err := url.Parse(t); err == nil {
return parsed
}
}
return fallback
}
// PrintHumanReadableValidationErrors prints human readable validation errors. Duh.
func (p *Provider) PrintHumanReadableValidationErrors(w io.Writer, err error) {
p.printHumanReadableValidationErrors(p.Koanf, w, err)
}
func (p *Provider) printHumanReadableValidationErrors(k *koanf.Koanf, w io.Writer, err error) {
if err == nil {
return
}
_, _ = fmt.Fprintln(os.Stderr, "")
conf, innerErr := k.Marshal(json.Parser())
if innerErr != nil {
_, _ = fmt.Fprintf(w, "Unable to unmarshal configuration: %+v", innerErr)
}
jsonschemax.FormatValidationErrorForCLI(w, conf, err)
}
|
[
"\"JAEGER_PROPAGATION\""
] |
[] |
[
"JAEGER_PROPAGATION"
] |
[]
|
["JAEGER_PROPAGATION"]
|
go
| 1 | 0 | |
channel_test.go
|
package test
import (
"sync"
"testing"
"github.com/Workiva/go-datastructures/queue"
)
func BenchmarkChannel(b *testing.B) {
ch := make(chan interface{}, 1)
b.ResetTimer()
go func() {
for i := 0; i < b.N; i++ {
<-ch
}
}()
for i := 0; i < b.N; i++ {
ch <- `a`
}
}
func BenchmarkRingBuffer(b *testing.B) {
q := queue.NewRingBuffer(1)
b.ResetTimer()
go func() {
for i := 0; i < b.N; i++ {
q.Get()
}
}()
for i := 0; i < b.N; i++ {
q.Put(`a`)
}
}
func BenchmarkChannelReadContention(b *testing.B) {
ch := make(chan interface{}, 100)
var wg sync.WaitGroup
wg.Add(1000)
b.ResetTimer()
go func() {
for i := 0; i < b.N; i++ {
ch <- `a`
}
}()
for i := 0; i < 1000; i++ {
go func() {
for i := 0; i < b.N/1000; i++ {
<-ch
}
wg.Done()
}()
}
wg.Wait()
}
func BenchmarkRingBufferReadContention(b *testing.B) {
q := queue.NewRingBuffer(100)
var wg sync.WaitGroup
wg.Add(1000)
b.ResetTimer()
go func() {
for i := 0; i < b.N; i++ {
q.Put(`a`)
}
}()
for i := 0; i < 1000; i++ {
go func() {
for i := 0; i < b.N/1000; i++ {
q.Get()
}
wg.Done()
}()
}
wg.Wait()
}
func BenchmarkChannelContention(b *testing.B) {
ch := make(chan interface{}, 100)
var wg sync.WaitGroup
wg.Add(1000)
b.ResetTimer()
for i := 0; i < 1000; i++ {
go func() {
for i := 0; i < b.N; i++ {
ch <- `a`
}
}()
}
for i := 0; i < 1000; i++ {
go func() {
for i := 0; i < b.N; i++ {
<-ch
}
wg.Done()
}()
}
wg.Wait()
}
func BenchmarkRingBufferContention(b *testing.B) {
q := queue.NewRingBuffer(100)
var wg sync.WaitGroup
wg.Add(1000)
b.ResetTimer()
for i := 0; i < 1000; i++ {
go func() {
for i := 0; i < b.N; i++ {
q.Put(`a`)
}
}()
}
for i := 0; i < 1000; i++ {
go func() {
for i := 0; i < b.N; i++ {
q.Get()
}
wg.Done()
}()
}
wg.Wait()
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/python/clawutil/make_all.py
|
"""
Performs 'make all' in each subdirectory to create sample results for the
gallery or to perform regression tests against results in the gallery,
or elsewhere.
Sends output and errors to separate files to simplify looking for errors.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
from six.moves import input
# Determine directory:
try:
CLAW = os.environ['CLAW']
except:
raise Exception("Need to set CLAW environment variable")
def list_examples(examples_dir):
"""
Searches all subdirectories of examples_dir for examples and prints out a list.
"""
import os
current_dir = os.getcwd()
os.chdir(examples_dir)
dirlist = []
applist = []
# Traverse directories depth-first (topdown=False) to insure e.g. that code in
# amrclaw/examples/acoustics_2d_radial/1drad
# is run before code in
# amrclaw/examples/acoustics_2d_radial
for (dirpath, subdirs, files) in os.walk('.',topdown=False):
# By convention we assume that a setrun.py file indicates this is an
# example directory.
files = os.listdir(os.path.abspath(dirpath))
if 'setrun.py' in files:
dirlist.append(os.path.abspath(dirpath))
os.chdir(current_dir)
return dirlist
def make_all(examples_dir = '.',make_clean_first=False, env=None):
import os,sys
if env is None:
my_env = os.environ
else:
my_env = env
examples_dir = os.path.abspath(examples_dir)
if not os.path.isdir(examples_dir):
raise Exception("Directory not found: %s" % examples_dir)
current_dir = os.getcwd()
dir_list = list_examples(examples_dir)
print("Found the following example subdirectories:")
for d in dir_list:
print(" ", d)
print("Will run code and make plots in the above subdirectories of ")
print(" ", examples_dir)
ans = input("Ok? ")
if ans.lower() not in ['y','yes']:
print("Aborting.")
sys.exit()
fname_output = 'make_all_output.txt'
fout = open(fname_output, 'w')
fout.write("ALL OUTPUT FROM RUNNING EXAMPLES\n\n")
fname_errors = 'make_all_errors.txt'
ferr = open(fname_errors, 'w')
ferr.write("ALL ERRORS FROM RUNNING EXAMPLES\n\n")
os.chdir(examples_dir)
goodlist_run = []
badlist_run = []
import subprocess
for directory in dir_list:
fout.write("\n=============================================\n")
fout.write(directory)
fout.write("\n=============================================\n")
ferr.write("\n=============================================\n")
ferr.write(directory)
ferr.write("\n=============================================\n")
os.chdir(directory)
# flush I/O buffers:
fout.flush()
ferr.flush()
if make_clean_first:
# Run 'make clean':
job = subprocess.Popen(['make','clean'], \
stdout=fout,stderr=ferr)
return_code = job.wait()
# Run 'make all':
job = subprocess.Popen(['make','all'], \
stdout=fout,stderr=ferr,env=my_env)
return_code = job.wait()
if return_code == 0:
print("Successful run\n")
goodlist_run.append(directory)
else:
print("*** Run errors encountered: see %s\n" % fname_errors)
badlist_run.append(directory)
print('------------------------------------------------------------- ')
print(' ')
print('Ran "make all" and created output and plots in directories:')
if len(goodlist_run) == 0:
print(' None')
else:
for d in goodlist_run:
print(' ',d)
print(' ')
print('Errors encountered in the following directories:')
if len(badlist_run) == 0:
print(' None')
else:
for d in badlist_run:
print(' ',d)
print(' ')
fout.close()
ferr.close()
print('For all output see ', fname_output)
print('For all errors see ', fname_errors)
os.chdir(current_dir)
if __name__=='__main__':
import sys
make_all(*sys.argv[1:])
|
[] |
[] |
[
"CLAW"
] |
[]
|
["CLAW"]
|
python
| 1 | 0 | |
meethalfway/views.py
|
from django.shortcuts import render, redirect
import csv
import time
import json
import requests
from django.core.urlresolvers import reverse
from django import forms
from . import models
import os
class EnterIDForm(forms.Form):
'''
Short form to enter in a meeting ID. Validate_trip_id function cleans the
ID and checks if it is in the database; if it isn't, returns None.
'''
trip_id = forms.CharField()
def validate_trip_id(self):
cleaned_id = self.cleaned_data
if models.Meeting.objects.filter(trip_id = cleaned_id['trip_id']):
return cleaned_id['trip_id']
else:
return None
class AddAddress(forms.ModelForm):
'''
ModelForm connected to the Address model. Provides field for street, city,
state, and zip code.
'''
class Meta:
model = models.Address
fields = ["street", "city", "state", "zip_code"]
class AddParticipant(forms.ModelForm):
'''
ModelForm connected to the Paricipant model. Provides a menu of valid modes
of transport. An address is combined with mode of transit to create a Participant
'''
class Meta:
model = models.Participant
fields = ["transit_mode"]
widgets = {
'transit_mode': forms.Select(),
}
class AddMeeting(forms.ModelForm):
'''
ModelForm connected to the Meeting model. Provides a menu of valid business
types. Created when a participant is added and initialized with empty values for
participant_two and destinations.
'''
class Meta:
model = models.Meeting
fields = ["business_type"]
widgets = {
'business_type': forms.Select()
}
def home(request):
'''
View for home page of MeetHalfway. Contains forms for the first participant to
enter their address, mode of transit, and business type for meeting. Validates
the address provided using a service from USPS. With completed form, creates an
Address, Participant, and Meeting then generates a unique Meeting ID. Also
displays a form to enter in the ID of a previously created meeting. If valid,
either redirects to a page for the second participant to enter information or
displays results if second person's information has already been added. If
invalid, displays error message.
'''
error = None
if request.method == 'POST':
if "participant_one_submit" in request.POST:
address = AddAddress(request.POST)
participant = AddParticipant(request.POST)
meeting = AddMeeting(request.POST)
#USPS api used to validate the entered address
if address.is_valid() and participant.is_valid() and meeting.is_valid():
trip_id, suggestion = participant_one(request, address, participant, meeting)
if trip_id != None:
return redirect('meethalfway:new_meeting', trip_id)
else:
#Returns error message if address invalid
return redirect('meethalfway:address_error1',suggestion)
elif 'enter_trip_id' in request.POST:
trip_id = EnterIDForm(request.POST)
if trip_id.is_valid():
valid_trip_id = trip_id.validate_trip_id()
if valid_trip_id:
meeting = models.Meeting.objects.get(trip_id = valid_trip_id)
if not meeting.participant_two:
#Redirects to form for participant two if not filled in
return redirect('meethalfway:participant_two', valid_trip_id)
else:
#Redirects to results if information already filled int
return redirect('meethalfway:results', valid_trip_id)
else:
#Error if invalid trip id is entered
error = True
address = AddAddress()
participant = AddParticipant()
meeting = AddMeeting()
trip_id = EnterIDForm()
c = {
'forms': [address, participant, meeting],
'trip_id_form': trip_id,
'not_found' : error
}
return render(request, 'halfwayapp/home.html', c)
def new_meeting(request, trip_id):
'''
Displays the trip id generated after participant one enters information.
'''
return render(request,'halfwayapp/response.html', {'uniq' : trip_id})
def participant_one(request, address, participant, meeting):
'''
Function to verify the address of the first participant and to create and save
the Address, Participant, and Meeting objects for the new meeting.
'''
address_obj = address.save()
verify, suggestion,verified_address_dict = address_obj.verify_address()
if verify:
verified_address = models.Address(street = verified_address_dict['address'], city = verified_address_dict['city'], \
state = verified_address_dict['state'], zip_code = verified_address_dict['zip5'])
verified_address.save()
address_obj.delete()
part_obj = participant.save()
part_obj.starting_location = verified_address
part_obj.save()
meeting_obj = meeting.save()
meeting_obj.participant_one = part_obj
meeting_obj.trip_id = meeting_obj.random_words()
meeting_obj.save()
else:
#Returns error message if address invalid
return None, suggestion
return meeting_obj.trip_id, None
def participant_two(request, trip_id):
'''
Handles information passed to create the second participant in a Meeting. If
a second participant has already been added, redirects to results. Otherwise,
creates a participant_two and calls get_destinations function.
'''
if request.method == 'POST':
meeting = models.Meeting.objects.get(trip_id = trip_id)
if meeting.participant_two:
#If second participant already added, redirects to results
return redirect('meethalfway:results', trip_id)
address = AddAddress(request.POST)
participant = AddParticipant(request.POST)
if address.is_valid() and participant.is_valid():
address_obj = address.save()
#USPS api used to validate the entered address
verify, suggestion, verified_address_dict = address_obj.verify_address()
if verify:
verified_address = models.Address(street = verified_address_dict['address'], city = verified_address_dict['city'], \
state = verified_address_dict['state'], zip_code = verified_address_dict['zip5'])
verified_address.save()
address_obj.delete()
part_obj = participant.save()
part_obj.starting_location = verified_address
part_obj.save()
meeting.participant_two = part_obj
meeting.save()
meeting.get_destinations()
else:
return redirect('meethalfway:address_error2', trip_id, suggestion)
return redirect('meethalfway:results', trip_id)
address = AddAddress()
participant = AddParticipant()
c = {
'forms': [address, participant],
'uniq': trip_id
}
return render(request, "halfwayapp/person2.html", c)
def results(request, trip_id):
'''
When called, finds the Meeting object associated with the trip id. Shows the
destination results if any were found and displays error message otherwise.
'''
meeting = models.Meeting.objects.get(trip_id = trip_id)
d = meeting.destinations.order_by('score')
if not d.exists():
return redirect('meethalfway:no_results')
destinations = d.reverse()
best_dest = destinations[:1].get().latlng
lat = best_dest.split(",")[0]
lng = best_dest.split(",")[1]
c = {
'destinations': destinations,
'trip_id': trip_id,
'lat': lat,
'lng': lng,
'goog_js_api_key': os.environ.get('GOOG_JS_API_KEY')
}
return render(request, "halfwayapp/results.html", c)
def about(request):
'''
Displays 'about' page with information about site.
'''
return render(request, "halfwayapp/about.html")
def no_results(request):
'''
Displays an error page if no potential destinations were found.
'''
return render(request, "halfwayapp/no_results.html")
def contact(request):
'''
Displays 'contact' page.
'''
return render(request, "halfwayapp/contact.html")
def address_error1(request, suggestion):
'''
Displays error message if first participant's address is invalid.
'''
c = {
'suggestion': suggestion
}
return render(request, "halfwayapp/address_error1.html", c)
def address_error2(request, trip_id, suggestion):
'''
Displays error message if second participant's address is invalid.
'''
c = {
'trip_id': trip_id,
'suggestion': suggestion
}
return render(request, "halfwayapp/address_error2.html", c)
|
[] |
[] |
[
"GOOG_JS_API_KEY"
] |
[]
|
["GOOG_JS_API_KEY"]
|
python
| 1 | 0 | |
src/github.com/cloudfoundry-incubator/bosh-fuzz-tests/vendor/github.com/cloudfoundry-incubator/bosh-load-tests/action/prepare_config_server.go
|
package action
import (
"encoding/json"
"net/url"
"errors"
"fmt"
"os"
"strings"
bltclirunner "github.com/cloudfoundry-incubator/bosh-load-tests/action/clirunner"
)
type prepareConfigServer struct {
directorInfo DirectorInfo
uaaRunner bltclirunner.Runner
}
func NewPrepareConfigServer(
directorInfo DirectorInfo,
uaaRunner bltclirunner.Runner,
) *prepareConfigServer {
return &prepareConfigServer{
directorInfo: directorInfo,
uaaRunner: uaaRunner,
}
}
func (p *prepareConfigServer) Execute() error {
// Setup UAA
targetURL, err := url.Parse(p.directorInfo.URL)
if nil != err {
return err
}
urlWithoutPort := strings.Split(targetURL.Host, ":")[0]
targetURL.Host = fmt.Sprintf("%s:8443", urlWithoutPort)
targetURL.Scheme = "https"
target := targetURL.String()
if err := p.uaaRunner.RunWithArgs("target", target, "--skip-ssl-validation"); nil != err {
return err
}
if err := p.uaaRunner.RunWithArgs("token", "client", "get", "director_config_server", "-s", os.Getenv("CONFIG_SERVER_PASSWORD")); nil != err {
return err
}
if err := p.setValue("/num_instances", 5); nil != err {
return err
}
if err := p.setValue("/prop3_value", "this is the value of prop 3!"); nil != err {
return err
}
return nil
}
func (p *prepareConfigServer) setValue(key string, value interface{}) error {
dataStruct := struct {
Name string `json:"name"`
Value interface{} `json:"value"`
}{key, value}
data, err := json.Marshal(dataStruct)
if nil != err {
return err
}
if directorIP, exist := os.LookupEnv("BOSH_DIRECTOR_IP"); exist {
if err := p.uaaRunner.RunWithArgs("curl", "--insecure", "--request", "PUT", "--header", "Content-Type:Application/JSON", "--data", string(data), fmt.Sprintf("https://%s:8080/v1/data", directorIP)); nil != err {
return err
}
} else {
return errors.New("could not find environment: BOSH_DIRECTOR_IP")
}
return nil
}
|
[
"\"CONFIG_SERVER_PASSWORD\""
] |
[] |
[
"CONFIG_SERVER_PASSWORD"
] |
[]
|
["CONFIG_SERVER_PASSWORD"]
|
go
| 1 | 0 | |
app/clean_test_app.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 18:54:48 2020
@author: dylanroyston
"""
# -*- coding: utf-8 -*-
# import packages
#import dash_player
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import psycopg2
import os
import pandas as pd
import numpy as np
import plotly
import plotly.express as px
import plotly.graph_objects as go
import librosa
import librosa.display as ld
import IPython.display as ipd
import pylab as pl
import boto3
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from matplotlib import cm
#from colorspacious import cspace_converter
#from collections import OrderedDict
######
# connect to PSQL and retrieve
psql_usr = os.environ.get('PSQL_USR')
psql_pw = os.environ.get('PSQL_PW')
conn = psycopg2.connect(host = 'ec2-13-58-251-142.us-east-2.compute.amazonaws.com',
dbname = 'spectralize',
user='postgres',
password=psql_pw)
##### read out metadata
metadata = conn.cursor()
metadata.execute("SELECT * FROM clean_metadata WHERE false;")
cols = set(metadata.fetchall())
metadata.execute("SELECT * FROM clean_metadata;")
md = set(metadata.fetchall())
cols = ["s3_key", "song_id", "album", "albumartist", "artist",
"audio_offset", "bitrate", "channels", "comment", "composer",
"disc", "disc_total", "duration", "filesize", "genre",
"samplerate", "title", "track", "track_total", "year"]
tag_df = pd.DataFrame(data=md, columns=cols)
##### s3 acess for playing audio files
s3_bucket = 'mdp-spectralize-pal'
number_of_files = 0
s3 = boto3.resource('s3')
bucket = s3.Bucket(s3_bucket)
# placeholders for callback initialization
standin_fp = '/home/dylanroyston/Documents/GIT/spectralize/app/hello.wav'
audio_sd_file = standin_fp
#audio_rawfile, new_sr = librosa.load(standin_fp, sr=None)
standin_data = np.array([[0,0],[0,0]])
standin_df = pd.DataFrame(standin_data, columns=['x','y'])
#audio_fig = px.line(standin_df, x='x', y='y', title='audio data', render_mode='webgl')
spec_fig = px.imshow(standin_df)
def load_audio_data(selected_row):
# read out audio data
#curr_song_id = tag_df.iloc[selected_row]['song_id']
curr_song_id = selected_row
# audiodata = conn.cursor()
# qstring = 'SELECT intensity FROM clean_audio WHERE song_id=' + str(curr_song_id)
# audiodata.execute(qstring)
# ad = np.array(audiodata.fetchall())
# audio_df = pd.DataFrame(data=ad, columns=['I'])
# audio_fig = px.line(audio_df, x=audio_df.index, y='I', title='audio data', render_mode='webgl')
# audio_fig.update_layout(
# height=250,
# margin_r=0,
# margin_l=0,
# margin_t=0,
# yaxis_title='',
# yaxis_fixedrange=True)
s3_key = tag_df.iloc[curr_song_id]['s3_key']
#this_row = tag_df.loc[tag_df['song_id'] == curr_song_id]
#s3_key = tag_df.iloc[this_row]['s3_key']
ext = s3_key[-4:]
audio_sd_file = '/home/dylanroyston/Documents/GIT/spectralize/app/audio_file' + ext
bucket.download_file(s3_key, audio_sd_file)
#audio_rawfile = librosa.load(audio_sd_file)
return audio_sd_file#, audio_fig
def load_spec_data(selected_row):
curr_song_id = selected_row
specdata = conn.cursor()
qstring = 'SELECT * FROM clean_spec WHERE song_id=' + str(curr_song_id)
specdata.execute(qstring)
sd = np.array(specdata.fetchall())
spec_df = pd.DataFrame(data=sd)
#currtitle = tag_df.iloc[curr_song_id]['title']
#currdur = tag_df.iloc[curr_song_id]['duration']
# numpts = len(sd)
# interval = float(currdur) / numpts
# timeline = np.linspace(0,float(currdur),numpts)
# rt = timeline.round(0)
trim_sd = spec_df.iloc[:,2:]
spec_fig = px.imshow(trim_sd.transpose(),
origin='lower',
#title=currtitle,
#x=timeline
)
spec_fig.update_layout(
height=250,
margin_r=0,
margin_l=0,
margin_t=0,
yaxis_title='Frequency',
xaxis_title='Time',
#colorbar.title='power',
yaxis_fixedrange=True,
#x=str(rt)
#title=currtitle
)
return spec_fig
#####
# initialize Dash app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
# header
html.H1(children='Metadata'),
# metadata table
dash_table.DataTable(
id = 'metadata_table',
data=tag_df.to_dict('rows'),
columns=[{'id': c, 'name': c} for c in tag_df.columns],
style_cell={
'overflowX': 'auto',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 10,
'row_selectable': 'single',
'font_family': 'Arial',
'font_size': '1.5rem',
'padding': '.5rem',
'backgroundColor': '#f4f4f2'
},
style_cell_conditional=[
{'textAlign': 'center'}
],
style_header={
'backgroundColor':'#f4f4f2',
'fontWeight': 'bold',
'overflowX': 'auto',
'textOverflow': 'ellipsis'
},
style_table={
'maxHeight':'500px',
'overflowX': 'scroll'
},
tooltip_data=[
{
column: {'value': str(value), 'type': 'markdown'}
for column, value in row.items()
} for row in tag_df.to_dict('rows')
],
tooltip_duration=None,
style_as_list_view=True,
),# end table
# load audio button
html.Br(),
html.Div(
[
dcc.Input(id='input_songnum', value='input song number', type='number'),
html.Button('Load audio',
id='submit-val',
style={'display': 'inline-block'},
n_clicks=0),
html.Div(id='song_input')
],
),
html.Br(),
# html.Audio(id="player", src=audio_sd_file, controls=True, style={
# "width": "100%"
# }),
# dash_player.DashPlayer(
# id='player',
# url='audio_sd_file',
# controls=True
# ),
html.Br(),
#dcc.Graph(id='waveform', figure=audio_fig),
html.Br(),
dcc.Graph(id='spect', figure=spec_fig)
])
##### finish Dash layout
##### callbacks
# load-audio button control
# @app.callback(
# Output('input_songnum', 'value'),
# [Input('submit-val', 'n_clicks')]
# )
# def retrieve_audio(value):
# return load_audio_data(value)
# @app.callback(
# Output('waveform', 'figure'),
# [Input('submit-val', 'n_clicks')]
# )
# def update_A_figure(submit_val):
# audio_fig = load_audio_data(submit_val)
# return audio_fig
## update audio player
# @app.callback(
# Output('player', 'src'),
# [Input('submit-val', 'n_clicks')]
# )
# def update_player(submit_val):
# audio_sd_file = load_audio_data(submit_val)
# return audio_sd_file
## update spect figure on button click
@app.callback(
Output('spect', 'figure'),
[Input('submit-val', 'n_clicks'),
Input('input_songnum', 'value')]
)
def update_S_figure(n_clicks, value):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'submit-val' in changed_id:
spec_fig = load_spec_data(value)
return spec_fig
## combined audiofile/spec update
# @app.callback(
# [Output('player', 'src'),
# Output('spect', 'figure')],
# [Input('submit-val', 'n_clicks')]
# )
# def update_figures(submit_val):
# audio_sd_file = load_audio_data(submit_val)
# spec_fig = load_spec_data(submit_val)
# return audio_sd_file, spec_fig
# @app.callback(
# Output('metadata_table', 'derived_virtual_selected_rows'),
# [Input('submit-val', 'n_clicks'),
# State('metadata_table', 'derived_virtual_selected_rows')]
# )
# def update_audio(n_clicks, derived_virtual_selected_rows):
# if derived_virtual_selected_rows is None:
# derived_virtual_selected_rows = []
# return load_audio_data(derived_virtual_selected_rows)
if __name__ == '__main__':
#app.run_server(debug=True, port=8050, host='127.0.0.1')
app.run_server(debug=True, port=8050, host='127.0.0.1')
#app.run_server(debug=True, port=80, host='ec2-18-224-114-72.us-east-2.compute.amazonaws.com')
|
[] |
[] |
[
"PSQL_USR",
"PSQL_PW"
] |
[]
|
["PSQL_USR", "PSQL_PW"]
|
python
| 2 | 0 | |
sentence_transformers/SentenceTransformer.py
|
import json
import logging
import os
import shutil
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type
from zipfile import ZipFile
import sys
import numpy as np
import transformers
import torch
from numpy import ndarray
from torch import nn, Tensor
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from tqdm import tqdm, trange
from . import __DOWNLOAD_SERVER__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, http_get
from . import __version__
class SentenceTransformer(nn.Sequential):
def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict(
[(str(idx), module) for idx, module in enumerate(modules)])
if model_name_or_path is not None and model_name_or_path != "":
logging.info("Load pretrained DialogTransformer: {}".format(
model_name_or_path))
if '/' not in model_name_or_path and '\\' not in model_name_or_path and not os.path.isdir(model_name_or_path):
logging.info("Did not find a / or \\ in the name. Assume to download model from server")
model_name_or_path = __DOWNLOAD_SERVER__ + model_name_or_path + '.zip'
if model_name_or_path.startswith('http://') or model_name_or_path.startswith('https://'):
model_url = model_name_or_path
folder_name = model_url.replace("https://", "").replace("http://", "").replace("/", "_")[:250]
# print('===================')
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
if torch_cache_home.startswith(
'C:\\Users\\something678/.cache\\torch'):
torch_cache_home = torch_cache_home.replace(
'C:\\Users\\something678/.cache\\torch',
('G:\\KnowledgeBaseData'
'\\sentenceTransformers_datasets'
'\\downloaded_saved_model'))
elif torch_cache_home.startswith(
'/home/something678/.cache/torch'):
torch_cache_home = torch_cache_home.replace(
'/home/something678/.cache/torch',
('/media/Data1/something678/sentence-transformers-master'
'/my_downloaded_saved_model'))
# print('=================== didnt enter exception')
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'sentence_transformers')
model_path = os.path.join(default_cache_path, folder_name)
os.makedirs(model_path, exist_ok=True)
if not os.listdir(model_path):
if model_url[-1] is "/":
model_url = model_url[:-1]
logging.info("Downloading sentence transformer model from {} and saving it at {}".format(model_url, model_path))
try:
zip_save_path = os.path.join(model_path, 'model.zip')
http_get(model_url, zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(model_path)
except Exception as e:
shutil.rmtree(model_path)
raise e
else:
model_path = model_name_or_path
#### Load from disk
if model_path is not None:
logging.info("Load SentenceTransformer from folder: {}".format(model_path))
if os.path.exists(os.path.join(model_path, 'config.json')):
with open(os.path.join(model_path, 'config.json')) as fIn:
config = json.load(fIn)
if config['__version__'] > __version__:
logging.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(config['__version__'], __version__))
with open(os.path.join(model_path, 'modules.json')) as fIn:
contained_modules = json.load(fIn)
# the modules are bert, LSTM and so-on
modules = OrderedDict()
for module_config in contained_modules:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logging.info("Use pytorch device: {}".format(device))
self.device = torch.device(device)
self.to(device)
def encode(self, sentences: List[str], batch_size: int = 8, show_progress_bar: bool = None) -> List[ndarray]:
"""
:param sentences:
the sentences to embed
:param batch_size:
the batch size used for the computation
:param show_progress_bar:
Output a progress bar when encode sentences
:return:
a list with ndarrays of the embeddings for each sentence
"""
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel()==logging.INFO or logging.getLogger().getEffectiveLevel()==logging.DEBUG)
all_embeddings = []
length_sorted_idx = np.argsort([len(sen) for sen in sentences])
iterator = range(0, len(sentences), batch_size)
if show_progress_bar:
iterator = tqdm(iterator, desc="Batches")
for batch_idx in iterator:
batch_tokens = []
batch_start = batch_idx
batch_end = min(batch_start + batch_size, len(sentences))
longest_seq = 0
for idx in length_sorted_idx[batch_start: batch_end]:
sentence = sentences[idx]
tokens = self.tokenize(sentence)
longest_seq = max(longest_seq, len(tokens))
batch_tokens.append(tokens)
features = {}
for text in batch_tokens:
sentence_features = self.get_sentence_features(text, longest_seq)
for feature_name in sentence_features:
if feature_name not in features:
features[feature_name] = []
features[feature_name].append(sentence_features[feature_name])
for feature_name in features:
features[feature_name] = torch.tensor(np.asarray(features[feature_name])).to(self.device)
with torch.no_grad():
embeddings = self.forward(features)
embeddings = embeddings['sentence_embedding'].to('cpu').numpy()
all_embeddings.extend(embeddings)
reverting_order = np.argsort(length_sorted_idx)
all_embeddings = [all_embeddings[idx] for idx in reverting_order]
return all_embeddings
def get_max_seq_length(self):
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, text):
return self._first_module().tokenize(text)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
return self._last_module().get_sentence_embedding_dimension()
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
"""
if path is None:
return
logging.info("Save model to {}".format(path))
contained_modules = []
for idx, name in enumerate(self._modules):
module = self._modules[name]
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(contained_modules, fOut, indent=2)
with open(os.path.join(path, 'config.json'), 'w') as fOut:
json.dump({'__version__': __version__}, fOut, indent=2)
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
batchsizes vary among the batches.
the list of two-sentnce pairs are batched so that
they can be fed to bert
Actually it converts instances to the batches
The dataloader has default collate_fn, that is, each batch is a list,
and [0] is feature[0], [1] is feature[1], etc., see collate_fn in
dataloader.py for detailed usages
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0][0])
labels = []
paired_texts = [[] for _ in range(num_texts)]
max_seq_len = [0] * num_texts
for tokens, label in batch:
labels.append(label)
for i in range(num_texts):
paired_texts[i].append(tokens[i])
max_seq_len[i] = max(max_seq_len[i], len(tokens[i]))
features = []
for idx in range(num_texts):
max_len = max_seq_len[idx]
feature_lists = {}
for text in paired_texts[idx]:
sentence_features = self.get_sentence_features(text, max_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
for feature_name in feature_lists:
feature_lists[feature_name] = torch.tensor(np.asarray(feature_lists[feature_name]))
features.append(feature_lists)
return {'features': features, 'labels': torch.stack(labels)}
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
fp16: bool = False,
fp16_opt_level: str = 'O1',
local_rank: int = -1
):
"""
:param weight_decay:
:param scheduler:
:param warmup_steps:
:param optimizer:
:param evaluation_steps:
:param output_path:
:param save_best_model:
:param max_grad_norm:
:param fp16:
:param fp16_opt_level:
:param local_rank:
:param train_objectives:
Tuples of DataLoader and LossConfig
:param evaluator:
:param epochs:
:param steps_per_epoch: Train for x steps in each epoch. If set to None, the length of the dataset will be used
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
if os.listdir(output_path):
raise ValueError("Output directory ({}) already exists and is not empty.".format(
output_path))
dataloaders = [dataloader for dataloader, _ in train_objectives]
'''
Each dataloader corresponds to a model, denoted as the train_objectives here
'''
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
'''
'''
loss_models = [loss for _, loss in train_objectives]
# retrieve the loss_models
device = self.device
for loss_model in loss_models:
loss_model.to(device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
# the smallerest dataset determines the steps_per_epoch, that is
# the num_of_batches per epoch
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
# for each epoch
# >>> lambda1 = lambda epoch: epoch // 30
# >>> lambda2 = lambda epoch: 0.95 ** epoch
# >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2])
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
'''
Choose parameters to optimize
'''
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = num_train_steps
if local_rank != -1:
t_total = t_total // torch.distributed.get_world_size()
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=t_total)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
if fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
for train_idx in range(len(loss_models)):
model, optimizer = amp.initialize(loss_models[train_idx], optimizers[train_idx], opt_level=fp16_opt_level)
loss_models[train_idx] = model
optimizers[train_idx] = optimizer
global_step = 0
# steps_per_epoch * number_of_loss_models
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
for epoch in trange(epochs, desc="Epoch"):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
# logging.info("Restart data_iterator")
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = batch_to_device(data, self.device)
loss_value = loss_model(features, labels)
if fp16:
with amp.scale_loss(loss_value, optimizer) as scaled_loss:
# scale the loss_value by the amplifier
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm)
else:
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch, training_steps)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(
self, evaluator, output_path, save_best_model, epoch, steps):
"""Runs evaluation during the training"""
if evaluator is not None:
score = evaluator(
self, output_path=output_path, epoch=epoch, steps=steps)
if score > self.best_score and save_best_model:
self.save(output_path)
self.best_score = score
def _get_scheduler(
self, optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
# this uses warmup
return transformers.get_constant_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps,
num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps,
num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps,
num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
|
[] |
[] |
[
"XDG_CACHE_HOME",
"TORCH_HOME"
] |
[]
|
["XDG_CACHE_HOME", "TORCH_HOME"]
|
python
| 2 | 0 | |
test/test_utils.py
|
import os
import torch
import unittest
import ocnn
class TestScatter(unittest.TestCase):
def test_scatter_add(self):
devices = ['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']
for device in devices:
src = torch.arange(1, 11, device=device).view(2, 5)
idx = torch.tensor([0, 1, 3, 2, 0], device=device)
gt = torch.tensor([[6, 2, 4, 3, 0], [16, 7, 9, 8, 0]], device=device)
output = ocnn.utils.scatter_add(src, idx, dim=1, dim_size=5)
self.assertTrue(torch.equal(output, gt))
def test_cumsum(self):
data = torch.tensor([[1, 2, 3], [4, 5, 6]])
gt1 = torch.tensor([[1, 3, 6], [4, 9, 15]])
gt2 = torch.tensor([[0, 1, 3, 6], [0, 4, 9, 15]])
gt3 = torch.tensor([[0, 0, 0], [1, 2, 3], [5, 7, 9]])
out1 = ocnn.utils.cumsum(data, dim=1, exclusive=False)
out2 = ocnn.utils.cumsum(data, dim=1, exclusive=True)
out3 = ocnn.utils.cumsum(data, dim=0, exclusive=True)
self.assertTrue(torch.equal(gt1, out1))
self.assertTrue(torch.equal(gt2, out2))
self.assertTrue(torch.equal(gt3, out3))
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
unittest.main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
test.py
|
import os
import torch.utils.data
from config import BATCH_SIZE
from layer import model, dataset
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
testset = dataset.CUB(root=r"./CUB_200_2011", is_train=False, data_len=None)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,
shuffle=False, num_workers=0, drop_last=False)
net = model.CNNtoGraph()
test_model="./checkpoints/model.ckpt"
ckpt = torch.load(test_model)
net.load_state_dict(ckpt['net_state_dict'])
net = net.cuda()
net.eval()
test_correct = 0
total = 0
for i, data in enumerate(testloader):
with torch.no_grad():
img, label = data[0].cuda(), data[1].cuda()
batch_size = img.size(0)
gnn_logits, raw_logits, concat_logits, part_logits, top_n_index, top_n_prob, top_n_cdds, part_feats = net(img)
_, concat_predict = torch.max(gnn_logits+concat_logits, 1)
total += batch_size
test_correct += torch.sum(concat_predict.data == label.data)
test_acc = float(test_correct) / total
print('test set acc: {:.3f} total sample: {}'.format(test_acc, total))
print('finishing testing')
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
tests/integration/integration_test.go
|
// Copyright 2016-2018, Pulumi Corporation. All rights reserved.
package ints
import (
"bytes"
"fmt"
"os"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/pulumi/pulumi/pkg/util/contract"
"github.com/stretchr/testify/assert"
"github.com/pulumi/pulumi/pkg/apitype"
"github.com/pulumi/pulumi/pkg/resource"
"github.com/pulumi/pulumi/pkg/resource/config"
"github.com/pulumi/pulumi/pkg/resource/deploy/providers"
"github.com/pulumi/pulumi/pkg/secrets/cloud"
ptesting "github.com/pulumi/pulumi/pkg/testing"
"github.com/pulumi/pulumi/pkg/testing/integration"
"github.com/pulumi/pulumi/pkg/workspace"
)
// assertPerfBenchmark implements the integration.TestStatsReporter interface, and reports test
// failures when a scenario exceeds the provided threshold.
type assertPerfBenchmark struct {
T *testing.T
MaxPreviewDuration time.Duration
MaxUpdateDuration time.Duration
}
func (t assertPerfBenchmark) ReportCommand(stats integration.TestCommandStats) {
var maxDuration *time.Duration
if strings.HasPrefix(stats.StepName, "pulumi-preview") {
maxDuration = &t.MaxPreviewDuration
}
if strings.HasPrefix(stats.StepName, "pulumi-update") {
maxDuration = &t.MaxUpdateDuration
}
if maxDuration != nil && *maxDuration != 0 {
if stats.ElapsedSeconds < maxDuration.Seconds() {
t.T.Logf(
"Test step %q was under threshold. %.2fs (max %.2fs)",
stats.StepName, stats.ElapsedSeconds, maxDuration.Seconds())
} else {
t.T.Errorf(
"Test step %q took longer than expected. %.2fs vs. max %.2fs",
stats.StepName, stats.ElapsedSeconds, maxDuration.Seconds())
}
}
}
// TestEmptyNodeJS simply tests that we can run an empty NodeJS project.
func TestEmptyNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// TestEmptyPython simply tests that we can run an empty Python project.
func TestEmptyPython(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "python"),
Dependencies: []string{
path.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
})
}
// TestEmptyGo simply tests that we can run an empty Go project.
func TestEmptyGo(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "go"),
Quick: true,
})
}
// TestEmptyDotNet simply tests that we can run an empty .NET project.
func TestEmptyDotNet(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "dotnet"),
Quick: true,
})
}
// Tests emitting many engine events doesn't result in a performance problem.
func TestEngineEventPerf(t *testing.T) {
// Prior to pulumi/pulumi#2303, a preview or update would take ~40s.
// Since then, it should now be down to ~4s, with additional padding,
// since some Travis machines (especially the macOS ones) seem quite slow
// to begin with.
benchmarkEnforcer := &assertPerfBenchmark{
T: t,
MaxPreviewDuration: 8 * time.Second,
MaxUpdateDuration: 8 * time.Second,
}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "ee_perf",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ReportStats: benchmarkEnforcer,
// Don't run in parallel since it is sensitive to system resources.
NoParallel: true,
})
}
// TestEngineEvents ensures that the test framework properly records and reads engine events.
func TestEngineEvents(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "single_resource",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure that we have a non-empty list of events.
assert.NotEmpty(t, stackInfo.Events)
// Ensure that we have two "ResourcePre" events: one for the stack and one for our resource.
preEventResourceTypes := []string{}
for _, e := range stackInfo.Events {
if e.ResourcePreEvent != nil {
preEventResourceTypes = append(preEventResourceTypes, e.ResourcePreEvent.Metadata.Type)
}
}
assert.Equal(t, 2, len(preEventResourceTypes))
assert.Contains(t, preEventResourceTypes, "pulumi:pulumi:Stack")
assert.Contains(t, preEventResourceTypes, "pulumi-nodejs:dynamic:Resource")
},
})
}
// TestProjectMain tests out the ability to override the main entrypoint.
func TestProjectMain(t *testing.T) {
test := integration.ProgramTestOptions{
Dir: "project_main",
Dependencies: []string{"@pulumi/pulumi"},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Simple runtime validation that just ensures the checkpoint was written and read.
assert.NotNil(t, stackInfo.Deployment)
},
}
integration.ProgramTest(t, &test)
t.Run("Error_AbsolutePath", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory("project_main_abs")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "main-abs")
stdout, stderr := e.RunCommandExpectError("pulumi", "up", "--non-interactive", "--skip-preview")
assert.Equal(t, "Updating (main-abs):\n", stdout)
assert.Contains(t, stderr, "project 'main' must be a relative path")
e.RunCommand("pulumi", "stack", "rm", "--yes")
})
t.Run("Error_ParentFolder", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory("project_main_parent")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "main-parent")
stdout, stderr := e.RunCommandExpectError("pulumi", "up", "--non-interactive", "--skip-preview")
assert.Equal(t, "Updating (main-parent):\n", stdout)
assert.Contains(t, stderr, "project 'main' must be a subfolder")
e.RunCommand("pulumi", "stack", "rm", "--yes")
})
}
// TestStackProjectName ensures we can read the Pulumi stack and project name from within the program.
func TestStackProjectName(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_project_name",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// TestStackTagValidation verifies various error scenarios related to stack names and tags.
func TestStackTagValidation(t *testing.T) {
t.Run("Error_StackName", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.RunCommand("git", "init")
e.ImportDirectory("stack_project_name")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
stdout, stderr := e.RunCommandExpectError("pulumi", "stack", "init", "invalid name (spaces, parens, etc.)")
assert.Equal(t, "", stdout)
assert.Contains(t, stderr, "error: could not create stack:")
assert.Contains(t, stderr, "validating stack properties:")
assert.Contains(t, stderr, "stack name may only contain alphanumeric, hyphens, underscores, or periods")
})
t.Run("Error_DescriptionLength", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.RunCommand("git", "init")
e.ImportDirectory("stack_project_name")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
prefix := "lorem ipsum dolor sit amet" // 26
prefix = prefix + prefix + prefix + prefix // 104
prefix = prefix + prefix + prefix + prefix // 416 + the current Pulumi.yaml's description
// Change the contents of the Description property of Pulumi.yaml.
yamlPath := path.Join(e.CWD, "Pulumi.yaml")
err := integration.ReplaceInFile("description: ", "description: "+prefix, yamlPath)
assert.NoError(t, err)
stdout, stderr := e.RunCommandExpectError("pulumi", "stack", "init", "valid-name")
assert.Equal(t, "", stdout)
assert.Contains(t, stderr, "error: could not create stack:")
assert.Contains(t, stderr, "validating stack properties:")
assert.Contains(t, stderr, "stack tag \"pulumi:description\" value is too long (max length 256 characters)")
})
}
func TestRemoveWithResourcesBlocked(t *testing.T) {
if os.Getenv("PULUMI_ACCESS_TOKEN") == "" {
t.Skipf("Skipping: PULUMI_ACCESS_TOKEN is not set")
}
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
stackName, err := resource.NewUniqueHex("rm-test-", 8, -1)
contract.AssertNoErrorf(err, "resource.NewUniqueHex should not fail with no maximum length is set")
e.ImportDirectory("single_resource")
e.RunCommand("pulumi", "stack", "init", stackName)
e.RunCommand("yarn", "link", "@pulumi/pulumi")
e.RunCommand("pulumi", "up", "--non-interactive", "--skip-preview")
_, stderr := e.RunCommandExpectError("pulumi", "stack", "rm", "--yes")
assert.Contains(t, stderr, "--force")
e.RunCommand("pulumi", "destroy", "--skip-preview", "--non-interactive", "--yes")
e.RunCommand("pulumi", "stack", "rm", "--yes")
}
// TestStackOutputs ensures we can export variables from a stack and have them get recorded as outputs.
func TestStackOutputsNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains a single resource, the Stack, with two outputs.
fmt.Printf("Deployment: %v", stackInfo.Deployment)
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 1, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.URN.Type())
assert.Equal(t, 0, len(stackRes.Inputs))
assert.Equal(t, 2, len(stackRes.Outputs))
assert.Equal(t, "ABC", stackRes.Outputs["xyz"])
assert.Equal(t, float64(42), stackRes.Outputs["foo"])
}
},
})
}
func TestStackOutputsPython(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "python"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains a single resource, the Stack, with two outputs.
fmt.Printf("Deployment: %v", stackInfo.Deployment)
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 1, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.URN.Type())
assert.Equal(t, 0, len(stackRes.Inputs))
assert.Equal(t, 2, len(stackRes.Outputs))
assert.Equal(t, "ABC", stackRes.Outputs["xyz"])
assert.Equal(t, float64(42), stackRes.Outputs["foo"])
}
},
})
}
func TestStackOutputsDotNet(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "dotnet"),
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains a single resource, the Stack, with two outputs.
fmt.Printf("Deployment: %v", stackInfo.Deployment)
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 1, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.URN.Type())
assert.Equal(t, 0, len(stackRes.Inputs))
assert.Equal(t, 2, len(stackRes.Outputs))
assert.Equal(t, "ABC", stackRes.Outputs["xyz"])
assert.Equal(t, float64(42), stackRes.Outputs["foo"])
}
},
})
}
// TestStackOutputsJSON ensures the CLI properly formats stack outputs as JSON when requested.
func TestStackOutputsJSON(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory(filepath.Join("stack_outputs", "nodejs"))
e.RunCommand("yarn", "link", "@pulumi/pulumi")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "stack-outs")
e.RunCommand("pulumi", "up", "--non-interactive", "--skip-preview")
stdout, _ := e.RunCommand("pulumi", "stack", "output", "--json")
assert.Equal(t, `{
"foo": 42,
"xyz": "ABC"
}
`, stdout)
}
// TestStackOutputsDisplayed ensures that outputs are printed at the end of an update
func TestStackOutputsDisplayed(t *testing.T) {
stdout := &bytes.Buffer{}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: false,
Verbose: true,
Stdout: stdout,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
output := stdout.String()
// ensure we get the outputs info both for the normal update, and for the no-change update.
assert.Contains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n\nResources:\n + 1 created")
assert.Contains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n\nResources:\n 1 unchanged")
},
})
}
// TestStackOutputsSuppressed ensures that outputs whose values are intentionally suppresses don't show.
func TestStackOutputsSuppressed(t *testing.T) {
stdout := &bytes.Buffer{}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: false,
Verbose: true,
Stdout: stdout,
UpdateCommandlineFlags: []string{"--suppress-outputs"},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
output := stdout.String()
assert.NotContains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n")
assert.NotContains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n")
},
})
}
// TestStackParenting tests out that stacks and components are parented correctly.
func TestStackParenting(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_parenting",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains resources parented correctly. This should look like this:
//
// A F
// / \ \
// B C G
// / \
// D E
//
// with the caveat, of course, that A and F will share a common parent, the implicit stack.
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 9, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.Type)
assert.Equal(t, "", string(stackRes.Parent))
urns := make(map[string]resource.URN)
for _, res := range stackInfo.Deployment.Resources[1:] {
assert.NotNil(t, res)
urns[string(res.URN.Name())] = res.URN
switch res.URN.Name() {
case "a", "f":
assert.NotEqual(t, "", res.Parent)
assert.Equal(t, stackRes.URN, res.Parent)
case "b", "c":
assert.Equal(t, urns["a"], res.Parent)
case "d", "e":
assert.Equal(t, urns["c"], res.Parent)
case "g":
assert.Equal(t, urns["f"], res.Parent)
case "default":
// Default providers are not parented.
assert.Equal(t, "", string(res.Parent))
default:
t.Fatalf("unexpected name %s", res.URN.Name())
}
}
}
},
})
}
func TestStackBadParenting(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_bad_parenting",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExpectFailure: true,
})
}
// TestStackDependencyGraph tests that the dependency graph of a stack is saved
// in the checkpoint file.
func TestStackDependencyGraph(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_dependencies",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
latest := stackInfo.Deployment
assert.True(t, len(latest.Resources) >= 2)
sawFirst := false
sawSecond := false
for _, res := range latest.Resources {
urn := string(res.URN)
if strings.Contains(urn, "dynamic:Resource::first") {
// The first resource doesn't depend on anything.
assert.Equal(t, 0, len(res.Dependencies))
sawFirst = true
} else if strings.Contains(urn, "dynamic:Resource::second") {
// The second resource uses an Output property of the first resource, so it
// depends directly on first.
assert.Equal(t, 1, len(res.Dependencies))
assert.True(t, strings.Contains(string(res.Dependencies[0]), "dynamic:Resource::first"))
sawSecond = true
}
}
assert.True(t, sawFirst && sawSecond)
},
})
}
// TestConfigSave ensures that config commands in the Pulumi CLI work as expected.
func TestConfigSave(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
// Initialize an empty stack.
path := filepath.Join(e.RootPath, "Pulumi.yaml")
err := (&workspace.Project{
Name: "testing-config",
Runtime: workspace.NewProjectRuntimeInfo("nodejs", nil),
}).Save(path)
assert.NoError(t, err)
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "testing-2")
e.RunCommand("pulumi", "stack", "init", "testing-1")
// Now configure and save a few different things:
e.RunCommand("pulumi", "config", "set", "configA", "value1")
e.RunCommand("pulumi", "config", "set", "configB", "value2", "--stack", "testing-2")
e.RunCommand("pulumi", "stack", "select", "testing-2")
e.RunCommand("pulumi", "config", "set", "configD", "value4")
e.RunCommand("pulumi", "config", "set", "configC", "value3", "--stack", "testing-1")
// Now read back the config using the CLI:
{
stdout, _ := e.RunCommand("pulumi", "config", "get", "configB")
assert.Equal(t, "value2\n", stdout)
}
{
// the config in a different stack, so this should error.
stdout, stderr := e.RunCommandExpectError("pulumi", "config", "get", "configA")
assert.Equal(t, "", stdout)
assert.NotEqual(t, "", stderr)
}
{
// but selecting the stack should let you see it
stdout, _ := e.RunCommand("pulumi", "config", "get", "configA", "--stack", "testing-1")
assert.Equal(t, "value1\n", stdout)
}
// Finally, check that the stack file contains what we expected.
validate := func(k string, v string, cfg config.Map) {
key, err := config.ParseKey("testing-config:config:" + k)
assert.NoError(t, err)
d, ok := cfg[key]
assert.True(t, ok, "config key %v should be set", k)
dv, err := d.Value(nil)
assert.NoError(t, err)
assert.Equal(t, v, dv)
}
testStack1, err := workspace.LoadProjectStack(filepath.Join(e.CWD, "Pulumi.testing-1.yaml"))
assert.NoError(t, err)
testStack2, err := workspace.LoadProjectStack(filepath.Join(e.CWD, "Pulumi.testing-2.yaml"))
assert.NoError(t, err)
assert.Equal(t, 2, len(testStack1.Config))
assert.Equal(t, 2, len(testStack2.Config))
validate("configA", "value1", testStack1.Config)
validate("configC", "value3", testStack1.Config)
validate("configB", "value2", testStack2.Config)
validate("configD", "value4", testStack2.Config)
e.RunCommand("pulumi", "stack", "rm", "--yes")
}
// TestConfigPaths ensures that config commands with paths work as expected.
func TestConfigPaths(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
// Initialize an empty stack.
path := filepath.Join(e.RootPath, "Pulumi.yaml")
err := (&workspace.Project{
Name: "testing-config",
Runtime: workspace.NewProjectRuntimeInfo("nodejs", nil),
}).Save(path)
assert.NoError(t, err)
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "testing")
namespaces := []string{"", "my:"}
tests := []struct {
Key string
Value string
Secret bool
Path bool
TopLevelKey string
TopLevelExpectedValue string
}{
{
Key: "aConfigValue",
Value: "this value is a value",
TopLevelKey: "aConfigValue",
TopLevelExpectedValue: "this value is a value",
},
{
Key: "anotherConfigValue",
Value: "this value is another value",
TopLevelKey: "anotherConfigValue",
TopLevelExpectedValue: "this value is another value",
},
{
Key: "bEncryptedSecret",
Value: "this super secret is encrypted",
Secret: true,
TopLevelKey: "bEncryptedSecret",
TopLevelExpectedValue: "this super secret is encrypted",
},
{
Key: "anotherEncryptedSecret",
Value: "another encrypted secret",
Secret: true,
TopLevelKey: "anotherEncryptedSecret",
TopLevelExpectedValue: "another encrypted secret",
},
{
Key: "[]",
Value: "square brackets value",
TopLevelKey: "[]",
TopLevelExpectedValue: "square brackets value",
},
{
Key: "x.y",
Value: "x.y value",
TopLevelKey: "x.y",
TopLevelExpectedValue: "x.y value",
},
{
Key: "0",
Value: "0 value",
Path: true,
TopLevelKey: "0",
TopLevelExpectedValue: "0 value",
},
{
Key: "true",
Value: "value",
Path: true,
TopLevelKey: "true",
TopLevelExpectedValue: "value",
},
{
Key: `["test.Key"]`,
Value: "test key value",
Path: true,
TopLevelKey: "test.Key",
TopLevelExpectedValue: "test key value",
},
{
Key: `nested["test.Key"]`,
Value: "nested test key value",
Path: true,
TopLevelKey: "nested",
TopLevelExpectedValue: `{"test.Key":"nested test key value"}`,
},
{
Key: "outer.inner",
Value: "value",
Path: true,
TopLevelKey: "outer",
TopLevelExpectedValue: `{"inner":"value"}`,
},
{
Key: "names[0]",
Value: "a",
Path: true,
TopLevelKey: "names",
TopLevelExpectedValue: `["a"]`,
},
{
Key: "names[1]",
Value: "b",
Path: true,
TopLevelKey: "names",
TopLevelExpectedValue: `["a","b"]`,
},
{
Key: "names[2]",
Value: "c",
Path: true,
TopLevelKey: "names",
TopLevelExpectedValue: `["a","b","c"]`,
},
{
Key: "names[3]",
Value: "super secret name",
Path: true,
Secret: true,
TopLevelKey: "names",
TopLevelExpectedValue: `["a","b","c","super secret name"]`,
},
{
Key: "servers[0].port",
Value: "80",
Path: true,
TopLevelKey: "servers",
TopLevelExpectedValue: `[{"port":80}]`,
},
{
Key: "servers[0].host",
Value: "example",
Path: true,
TopLevelKey: "servers",
TopLevelExpectedValue: `[{"host":"example","port":80}]`,
},
{
Key: "a.b[0].c",
Value: "true",
Path: true,
TopLevelKey: "a",
TopLevelExpectedValue: `{"b":[{"c":true}]}`,
},
{
Key: "a.b[1].c",
Value: "false",
Path: true,
TopLevelKey: "a",
TopLevelExpectedValue: `{"b":[{"c":true},{"c":false}]}`,
},
{
Key: "tokens[0]",
Value: "shh",
Path: true,
Secret: true,
TopLevelKey: "tokens",
TopLevelExpectedValue: `["shh"]`,
},
{
Key: "foo.bar",
Value: "don't tell",
Path: true,
Secret: true,
TopLevelKey: "foo",
TopLevelExpectedValue: `{"bar":"don't tell"}`,
},
{
Key: "semiInner.a.b.c.d",
Value: "1",
Path: true,
TopLevelKey: "semiInner",
TopLevelExpectedValue: `{"a":{"b":{"c":{"d":1}}}}`,
},
{
Key: "wayInner.a.b.c.d.e.f.g.h.i.j.k",
Value: "false",
Path: true,
TopLevelKey: "wayInner",
TopLevelExpectedValue: `{"a":{"b":{"c":{"d":{"e":{"f":{"g":{"h":{"i":{"j":{"k":false}}}}}}}}}}}`,
},
// Overwriting a top-level string value is allowed.
{
Key: "aConfigValue.inner",
Value: "new value",
Path: true,
TopLevelKey: "aConfigValue",
TopLevelExpectedValue: `{"inner":"new value"}`,
},
{
Key: "anotherConfigValue[0]",
Value: "new value",
Path: true,
TopLevelKey: "anotherConfigValue",
TopLevelExpectedValue: `["new value"]`,
},
{
Key: "bEncryptedSecret.inner",
Value: "new value",
Path: true,
TopLevelKey: "bEncryptedSecret",
TopLevelExpectedValue: `{"inner":"new value"}`,
},
{
Key: "anotherEncryptedSecret[0]",
Value: "new value",
Path: true,
TopLevelKey: "anotherEncryptedSecret",
TopLevelExpectedValue: `["new value"]`,
},
}
validateConfigGet := func(key string, value string, path bool) {
args := []string{"config", "get", key}
if path {
args = append(args, "--path")
}
stdout, stderr := e.RunCommand("pulumi", args...)
assert.Equal(t, fmt.Sprintf("%s\n", value), stdout)
assert.Equal(t, "", stderr)
}
for _, ns := range namespaces {
for _, test := range tests {
key := fmt.Sprintf("%s%s", ns, test.Key)
topLevelKey := fmt.Sprintf("%s%s", ns, test.TopLevelKey)
// Set the value.
args := []string{"config", "set"}
if test.Secret {
args = append(args, "--secret")
}
if test.Path {
args = append(args, "--path")
}
args = append(args, key, test.Value)
stdout, stderr := e.RunCommand("pulumi", args...)
assert.Equal(t, "", stdout)
assert.Equal(t, "", stderr)
// Get the value and validate it.
validateConfigGet(key, test.Value, test.Path)
// Get the top-level value and validate it.
validateConfigGet(topLevelKey, test.TopLevelExpectedValue, false /*path*/)
}
}
badKeys := []string{
// Syntax errors.
"root[",
`root["nested]`,
"root.array[abc]",
"root.[1]",
// First path segment must be a non-empty string.
`[""]`,
"[0]",
// Index out of range.
"names[-1]",
"names[5]",
// A "secure" key that is a map with a single string value is reserved by the system.
"key.secure",
"super.nested.map.secure",
// Type mismatch.
"outer[0]",
"names.nested",
"outer.inner.nested",
"outer.inner[0]",
}
for _, ns := range namespaces {
for _, badKey := range badKeys {
key := fmt.Sprintf("%s%s", ns, badKey)
stdout, stderr := e.RunCommandExpectError("pulumi", "config", "set", "--path", key, "value")
assert.Equal(t, "", stdout)
assert.NotEqual(t, "", stderr)
}
}
e.RunCommand("pulumi", "stack", "rm", "--yes")
}
// Tests basic configuration from the perspective of a Pulumi program.
func TestConfigBasicNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_basic", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"aConfigValue": "this value is a value",
},
Secrets: map[string]string{
"bEncryptedSecret": "this super secret is encrypted",
},
OrderedConfig: []integration.ConfigValue{
{Key: "outer.inner", Value: "value", Path: true},
{Key: "names[0]", Value: "a", Path: true},
{Key: "names[1]", Value: "b", Path: true},
{Key: "names[2]", Value: "c", Path: true},
{Key: "names[3]", Value: "super secret name", Path: true, Secret: true},
{Key: "servers[0].port", Value: "80", Path: true},
{Key: "servers[0].host", Value: "example", Path: true},
{Key: "a.b[0].c", Value: "true", Path: true},
{Key: "a.b[1].c", Value: "false", Path: true},
{Key: "tokens[0]", Value: "shh", Path: true, Secret: true},
{Key: "foo.bar", Value: "don't tell", Path: true, Secret: true},
},
})
}
func TestConfigCaptureNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_capture_e2e", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"value": "it works",
},
})
}
func TestInvalidVersionInPackageJson(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("invalid_package_json"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{},
})
}
// Tests basic configuration from the perspective of a Pulumi program.
func TestConfigBasicPython(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_basic", "python"),
Dependencies: []string{
path.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
Config: map[string]string{
"aConfigValue": "this value is a Pythonic value",
},
Secrets: map[string]string{
"bEncryptedSecret": "this super Pythonic secret is encrypted",
},
OrderedConfig: []integration.ConfigValue{
{Key: "outer.inner", Value: "value", Path: true},
{Key: "names[0]", Value: "a", Path: true},
{Key: "names[1]", Value: "b", Path: true},
{Key: "names[2]", Value: "c", Path: true},
{Key: "names[3]", Value: "super secret name", Path: true, Secret: true},
{Key: "servers[0].port", Value: "80", Path: true},
{Key: "servers[0].host", Value: "example", Path: true},
{Key: "a.b[0].c", Value: "true", Path: true},
{Key: "a.b[1].c", Value: "false", Path: true},
{Key: "tokens[0]", Value: "shh", Path: true, Secret: true},
{Key: "foo.bar", Value: "don't tell", Path: true, Secret: true},
},
})
}
// Tests basic configuration from the perspective of a Pulumi Go program.
func TestConfigBasicGo(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_basic", "go"),
Quick: true,
Config: map[string]string{
"aConfigValue": "this value is a value",
},
Secrets: map[string]string{
"bEncryptedSecret": "this super secret is encrypted",
},
OrderedConfig: []integration.ConfigValue{
{Key: "outer.inner", Value: "value", Path: true},
{Key: "names[0]", Value: "a", Path: true},
{Key: "names[1]", Value: "b", Path: true},
{Key: "names[2]", Value: "c", Path: true},
{Key: "names[3]", Value: "super secret name", Path: true, Secret: true},
{Key: "servers[0].port", Value: "80", Path: true},
{Key: "servers[0].host", Value: "example", Path: true},
{Key: "a.b[0].c", Value: "true", Path: true},
{Key: "a.b[1].c", Value: "false", Path: true},
{Key: "tokens[0]", Value: "shh", Path: true, Secret: true},
{Key: "foo.bar", Value: "don't tell", Path: true, Secret: true},
},
})
}
// Tests basic configuration from the perspective of a Pulumi .NET program.
func TestConfigBasicDotNet(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_basic", "dotnet"),
Quick: true,
Config: map[string]string{
"aConfigValue": "this value is a value",
},
Secrets: map[string]string{
"bEncryptedSecret": "this super secret is encrypted",
},
OrderedConfig: []integration.ConfigValue{
{Key: "outer.inner", Value: "value", Path: true},
{Key: "names[0]", Value: "a", Path: true},
{Key: "names[1]", Value: "b", Path: true},
{Key: "names[2]", Value: "c", Path: true},
{Key: "names[3]", Value: "super secret name", Path: true, Secret: true},
{Key: "servers[0].port", Value: "80", Path: true},
{Key: "servers[0].host", Value: "example", Path: true},
{Key: "a.b[0].c", Value: "true", Path: true},
{Key: "a.b[1].c", Value: "false", Path: true},
{Key: "tokens[0]", Value: "shh", Path: true, Secret: true},
{Key: "foo.bar", Value: "don't tell", Path: true, Secret: true},
},
})
}
// Tests an explicit provider instance.
func TestExplicitProvider(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "explicit_provider",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
latest := stackInfo.Deployment
// Expect one stack resource, two provider resources, and two custom resources.
assert.True(t, len(latest.Resources) == 5)
var defaultProvider *apitype.ResourceV3
var explicitProvider *apitype.ResourceV3
for _, res := range latest.Resources {
urn := res.URN
switch urn.Name() {
case "default":
assert.True(t, providers.IsProviderType(res.Type))
assert.Nil(t, defaultProvider)
prov := res
defaultProvider = &prov
case "p":
assert.True(t, providers.IsProviderType(res.Type))
assert.Nil(t, explicitProvider)
prov := res
explicitProvider = &prov
case "a":
prov, err := providers.ParseReference(res.Provider)
assert.NoError(t, err)
assert.NotNil(t, defaultProvider)
defaultRef, err := providers.NewReference(defaultProvider.URN, defaultProvider.ID)
assert.NoError(t, err)
assert.Equal(t, defaultRef.String(), prov.String())
case "b":
prov, err := providers.ParseReference(res.Provider)
assert.NoError(t, err)
assert.NotNil(t, explicitProvider)
explicitRef, err := providers.NewReference(explicitProvider.URN, explicitProvider.ID)
assert.NoError(t, err)
assert.Equal(t, explicitRef.String(), prov.String())
}
}
assert.NotNil(t, defaultProvider)
assert.NotNil(t, explicitProvider)
},
})
}
// Tests that reads of unknown IDs do not fail.
func TestGetCreated(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "get_created",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// Tests that stack references work in Node.
func TestStackReferenceNodeJS(t *testing.T) {
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
opts := &integration.ProgramTestOptions{
Dir: "stack_reference",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
},
EditDirs: []integration.EditDir{
{
Dir: "step1",
Additive: true,
},
{
Dir: "step2",
Additive: true,
},
},
}
integration.ProgramTest(t, opts)
}
func TestStackReferencePython(t *testing.T) {
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
opts := &integration.ProgramTestOptions{
Dir: filepath.Join("stack_reference", "python"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
},
}
integration.ProgramTest(t, opts)
}
func TestStackReferenceDotNet(t *testing.T) {
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
opts := &integration.ProgramTestOptions{
Dir: filepath.Join("stack_reference", "dotnet"),
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
},
}
integration.ProgramTest(t, opts)
}
// Tests that we issue an error if we fail to locate the Python command when running
// a Python example.
func TestPython3NotInstalled(t *testing.T) {
stderr := &bytes.Buffer{}
badPython := "python3000"
expectedError := fmt.Sprintf(
"error: Failed to locate '%s' on your PATH. Have you installed Python 3.6 or greater?",
badPython)
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: path.Join("empty", "python"),
Dependencies: []string{
path.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
Env: []string{
// Note: we use PULUMI_PYTHON_CMD to override the default behavior of searching
// for Python 3, since anyone running tests surely already has Python 3 installed on their
// machine. The code paths are functionally the same.
fmt.Sprintf("PULUMI_PYTHON_CMD=%s", badPython),
},
ExpectFailure: true,
Stderr: stderr,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
output := stderr.String()
assert.Contains(t, output, expectedError)
},
})
}
// TestProviderSecretConfig that a first class provider can be created when it has secrets as part of its config.
func TestProviderSecretConfig(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "provider_secret_config",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// Tests dynamic provider in Python.
func TestDynamicPython(t *testing.T) {
var randomVal string
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("dynamic", "python"),
Dependencies: []string{
path.Join("..", "..", "sdk", "python", "env", "src"),
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
randomVal = stack.Outputs["random_val"].(string)
},
EditDirs: []integration.EditDir{{
Dir: "step1",
Additive: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assert.Equal(t, randomVal, stack.Outputs["random_val"].(string))
},
}},
})
}
func TestResourceWithSecretSerialization(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "secret_outputs",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// The program exports two resources, one named `withSecret` who's prefix property should be secret
// and one named `withoutSecret` which should not. We serialize both of the these as POJO objects, so
// they appear as maps in the output.
withSecretProps, ok := stackInfo.Outputs["withSecret"].(map[string]interface{})
assert.Truef(t, ok, "POJO output was not serialized as a map")
withoutSecretProps, ok := stackInfo.Outputs["withoutSecret"].(map[string]interface{})
assert.Truef(t, ok, "POJO output was not serialized as a map")
// The secret prop should have been serialized as a secret
secretPropValue, ok := withSecretProps["prefix"].(map[string]interface{})
assert.Truef(t, ok, "secret output was not serialized as a secret")
assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string))
// And here, the prop was not set, it should just be a string value
_, isString := withoutSecretProps["prefix"].(string)
assert.Truef(t, isString, "non-secret output was not a string")
},
})
}
func TestStackReferenceSecrets(t *testing.T) {
owner := os.Getenv("PULUMI_TEST_OWNER")
if owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
d := "stack_reference_secrets"
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: path.Join(d, "step1"),
Dependencies: []string{"@pulumi/pulumi"},
Config: map[string]string{
"org": owner,
},
Quick: true,
EditDirs: []integration.EditDir{
{
Dir: path.Join(d, "step2"),
Additive: true,
ExpectNoChanges: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
_, isString := stackInfo.Outputs["refNormal"].(string)
assert.Truef(t, isString, "referenced non-secret output was not a string")
secretPropValue, ok := stackInfo.Outputs["refSecret"].(map[string]interface{})
assert.Truef(t, ok, "secret output was not serialized as a secret")
assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string))
},
},
},
})
}
func TestCloudSecretProvider(t *testing.T) {
kmsKeyAlias := os.Getenv("PULUMI_TEST_KMS_KEY_ALIAS")
if kmsKeyAlias == "" {
t.Skipf("Skipping: PULUMI_TEST_KMS_KEY_ALIAS is not set")
}
testOptions := integration.ProgramTestOptions{
Dir: "cloud_secrets_provider",
Dependencies: []string{"@pulumi/pulumi"},
SecretsProvider: fmt.Sprintf("awskms://alias/%s", kmsKeyAlias),
Secrets: map[string]string{
"mysecret": "THISISASECRET",
},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
secretsProvider := stackInfo.Deployment.SecretsProviders
assert.NotNil(t, secretsProvider)
assert.Equal(t, secretsProvider.Type, "cloud")
_, err := cloud.NewCloudSecretsManagerFromState(secretsProvider.State)
assert.NoError(t, err)
out, ok := stackInfo.Outputs["out"].(map[string]interface{})
assert.True(t, ok)
_, ok = out["ciphertext"]
assert.True(t, ok)
},
}
localTestOptions := testOptions.With(integration.ProgramTestOptions{
CloudURL: "file://~",
})
// Run with default Pulumi service backend
t.Run("service", func(t *testing.T) { integration.ProgramTest(t, &testOptions) })
// Also run with local backend
t.Run("local", func(t *testing.T) { integration.ProgramTest(t, &localTestOptions) })
}
func TestPartialValuesNode(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("partial_values", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
AllowEmptyPreviewChanges: true,
})
}
func TestPartialValuesPython(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("partial_values", "python"),
Dependencies: []string{
path.Join("..", "..", "sdk", "python", "env", "src"),
},
AllowEmptyPreviewChanges: true,
})
}
|
[
"\"PULUMI_ACCESS_TOKEN\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_KMS_KEY_ALIAS\""
] |
[] |
[
"PULUMI_ACCESS_TOKEN",
"PULUMI_TEST_OWNER",
"PULUMI_TEST_KMS_KEY_ALIAS"
] |
[]
|
["PULUMI_ACCESS_TOKEN", "PULUMI_TEST_OWNER", "PULUMI_TEST_KMS_KEY_ALIAS"]
|
go
| 3 | 0 | |
panqec/config.py
|
"""
Settings from environmental variables and config files.
:Author:
Eric Huang
"""
import os
from dotenv import load_dotenv
from .codes import (
Toric3DCode, Toric2DCode,
RotatedPlanar3DCode, XCubeCode,
RotatedToric3DCode, RhombicCode
)
from .decoders import (
Toric3DMatchingDecoder, SweepMatchDecoder,
RotatedSweepMatchDecoder, RotatedInfiniteZBiasDecoder
)
from .decoders.bposd.bposd_decoder import BeliefPropagationOSDDecoder
from .decoders.bposd.mbp_decoder import MemoryBeliefPropagationDecoder
from .decoders.sweepmatch._toric_2d_match_decoder import Toric2DMatchingDecoder
from .error_models import (
DeformedXZZXErrorModel, DeformedXYErrorModel,
DeformedRhombicErrorModel, DeformedRandomErrorModel
)
from .decoders import (
DeformedSweepMatchDecoder, FoliatedMatchingDecoder,
DeformedRotatedSweepMatchDecoder
)
from .error_models import PauliErrorModel
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Load the .env file into environmental variables.
if os.getenv('PANQEC_DIR') is None:
load_dotenv()
PANQEC_DARK_THEME = False
if os.getenv('PANQEC_DARK_THEME'):
PANQEC_DARK_THEME = bool(os.getenv('PANQEC_DARK_THEME'))
# Fallback is to use temp dir inside repo if PANQEC_DIR is not available.
PANQEC_DIR = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
'temp'
)
# Load the output directory from environmental variables.
if os.getenv('PANQEC_DIR') is not None:
PANQEC_DIR = os.path.abspath(str(os.getenv('PANQEC_DIR')))
if not os.path.isdir(PANQEC_DIR):
raise FileNotFoundError(
f'PANQEC_DIR={PANQEC_DIR} is not a valid directory. '
'Check .env configuration.'
)
# Register your models here.
CODES = {
'Toric2DCode': Toric2DCode,
'Toric3DCode': Toric3DCode,
'RhombicCode': RhombicCode,
'RotatedPlanar3DCode': RotatedPlanar3DCode,
'RotatedToric3DCode': RotatedToric3DCode,
'XCubeCode': XCubeCode
}
ERROR_MODELS = {
'PauliErrorModel': PauliErrorModel,
'DeformedXZZXErrorModel': DeformedXZZXErrorModel,
'DeformedRandomErrorModel': DeformedRandomErrorModel,
'DeformedXYErrorModel': DeformedXYErrorModel,
'DeformedRhombicErrorModel': DeformedRhombicErrorModel,
}
DECODERS = {
'Toric2DMatchingDecoder': Toric2DMatchingDecoder,
'Toric3DMatchingDecoder': Toric3DMatchingDecoder,
'SweepMatchDecoder': SweepMatchDecoder,
'RotatedSweepMatchDecoder': RotatedSweepMatchDecoder,
'DeformedSweepMatchDecoder': DeformedSweepMatchDecoder,
'FoliatedMatchingDecoder': FoliatedMatchingDecoder,
'DeformedRotatedSweepMatchDecoder': DeformedRotatedSweepMatchDecoder,
'BeliefPropagationOSDDecoder': BeliefPropagationOSDDecoder,
'MemoryBeliefPropagationDecoder': MemoryBeliefPropagationDecoder,
'RotatedInfiniteZBiasDecoder': RotatedInfiniteZBiasDecoder
}
# Slurm automation config.
SLURM_DIR = os.path.join(os.path.dirname(BASE_DIR), 'slurm')
if os.getenv('SLURM_DIR') is not None:
SLURM_DIR = os.path.abspath(str(os.getenv('SLURM_DIR')))
SBATCH_TEMPLATE = os.path.join(
os.path.dirname(BASE_DIR), 'scripts', 'template.sbatch'
)
NIST_TEMPLATE = os.path.join(
os.path.dirname(BASE_DIR), 'scripts', 'nist.sbatch'
)
# Slurm username for reporting status.
SLURM_USERNAME = None
if os.getenv('USER') is not None:
SLURM_USERNAME = os.getenv('USER')
elif os.getenv('USERNAME') is not None:
SLURM_USERNAME = os.getenv('USERNAME')
def register_code(code_class):
label = code_class.__class__.__name__
CODES[label] = code_class
def register_error_model(error_model_class):
label = error_model_class.__class__.__name__
ERROR_MODELS[label] = error_model_class
def register_decoder(decoder_class):
label = decoder_class.__class__.__name__
DECODERS[label] = decoder_class
|
[] |
[] |
[
"USERNAME",
"SLURM_DIR",
"PANQEC_DARK_THEME",
"PANQEC_DIR",
"USER"
] |
[]
|
["USERNAME", "SLURM_DIR", "PANQEC_DARK_THEME", "PANQEC_DIR", "USER"]
|
python
| 5 | 0 | |
libcst/codemod/commands/tests/test_remove_unused_imports.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# pyre-strict
from libcst.codemod import CodemodTest
from libcst.codemod.commands.remove_unused_imports import RemoveUnusedImportsCommand
class RemoveUnusedImportsCommandTest(CodemodTest):
TRANSFORM = RemoveUnusedImportsCommand
def test_simple_case(self) -> None:
before = "import a, b\na()"
after = "import a\na()"
self.assertCodemod(before, after)
def test_double_import(self) -> None:
before = "import a\nimport a\na()"
self.assertCodemod(before, before)
def test_conditional_import(self) -> None:
before = """
if True:
import a
else:
import b as a
a()
"""
self.assertCodemod(before, before)
def test_unused_in_conditional(self) -> None:
before = """
if False:
import a
"""
after = """
if False:
pass
"""
self.assertCodemod(before, after)
def test_type_annotations(self) -> None:
before = """
import a
x: a = 1
"""
self.assertCodemod(before, before)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
internal/socket/socket_test.go
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
package socket_test
import (
"net"
"runtime"
"syscall"
"testing"
"github.com/sunnogo/net/internal/nettest"
"github.com/sunnogo/net/internal/socket"
)
func TestSocket(t *testing.T) {
t.Run("Option", func(t *testing.T) {
testSocketOption(t, &socket.Option{Level: syscall.SOL_SOCKET, Name: syscall.SO_RCVBUF, Len: 4})
})
}
func testSocketOption(t *testing.T, so *socket.Option) {
c, err := nettest.NewLocalPacketListener("udp")
if err != nil {
t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err)
}
defer c.Close()
cc, err := socket.NewConn(c.(net.Conn))
if err != nil {
t.Fatal(err)
}
const N = 2048
if err := so.SetInt(cc, N); err != nil {
t.Fatal(err)
}
n, err := so.GetInt(cc)
if err != nil {
t.Fatal(err)
}
if n < N {
t.Fatalf("got %d; want greater than or equal to %d", n, N)
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
wootric.py
|
import os, requests, json, dateparser, time, yaml, boto3, slack, random
from typing import List
from pathlib import Path
from pprint import pprint
from traceback import format_exc
from missing_data import get_missing_responses, get_missing_users
BATCH_SIZE = 500
wootric_session = requests.Session()
ACCESS_TOKEN = ''
CLIENT_ID = os.getenv('WOOTRIC_CLIENT_ID')
CLIENT_SECRET = os.getenv('WOOTRIC_CLIENT_SECRET')
BASE_URL = 'https://api.wootric.com'
stitch_session = requests.Session()
STITCH_CLIENT_ID = os.getenv('STITCH_CLIENT_ID')
STITCH_TOKEN = os.getenv('STITCH_TOKEN')
STITCH_BASE_URL = 'https://api.stitchdata.com'
stitch_session.headers = {'Authorization': f'Bearer {STITCH_TOKEN}', 'Content-Type': 'application/json'}
BUCKET = os.getenv('AWS_BUCKET')
os.environ['AWS_ACCESS_KEY_ID'] = os.getenv('AWS_ACCESS_KEY_ID_', os.getenv('AWS_ACCESS_KEY_ID')) # lambda doesn't allow this reserved var
os.environ['AWS_SECRET_ACCESS_KEY'] = os.getenv('AWS_SECRET_ACCESS_KEY_', os.getenv('AWS_SECRET_ACCESS_KEY')) # lambda doesn't allow this reserved var
os.environ['AWS_SESSION_TOKEN'] = '' # lambda provides this reserved var during execution, need to set blank
STATE_KEY = 'wootric.state.json'
s3 = boto3.resource("s3").Bucket(BUCKET)
slack_client = slack.WebhookClient(url=os.getenv('SLACK_WH_TOKEN'))
# init state
state = dict(
end_users=1420070400,
responses=1420070400,
declines=1420070400,
)
def get_access_token():
global ACCESS_TOKEN
url = f'{BASE_URL}/oauth/token'
payload = dict(
grant_type='client_credentials',
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
)
resp = wootric_session.post(url, payload)
data : dict = resp.json()
ACCESS_TOKEN = data.get('access_token')
if not ACCESS_TOKEN:
raise Exception('did not find access_token')
wootric_session.headers = dict(Authorization=f'Bearer {ACCESS_TOKEN}')
def wootric_response(user_id: str, response_id: str):
url = f'{BASE_URL}/v1/end_users/{user_id}/responses/{response_id}'
print(url)
resp = wootric_session.get(url)
return resp.json()
def wootric_user(user_id: str):
url = f'{BASE_URL}/v1/end_users/{user_id}'
print(url)
resp = wootric_session.get(url)
return resp.json()
def wootric_request(object_name: str, date_key: str, **params):
url = f'{BASE_URL}/v1/{object_name}'
req = requests.models.PreparedRequest()
date_val = state[object_name]
# put random limit because seems some get missed. an attempt to randomize sort anchors
limit = random.randint(5,29)
params[f"{date_key.replace('_at', '')}[gte]"] = date_val - 1
page = 0
all = []
while True:
page += 1
if page > limit: break
params['page'] = page
req.prepare_url(url, params)
print(req.url)
try:
resp = wootric_session.get(req.url)
if resp is None:
raise Exception(f'Response is for: {req.url}')
elif not resp.ok:
raise Exception(f'\n\nHTTP Status Code {resp.status_code} for {req.url}: \n{resp.text}')
except Exception:
raise Exception(f'Error for {req.url}.\n\n{format_exc()}')
data = resp.json()
if len(data) == 0:
break
all += data
return all
def send_batch(object_name: str, schema: dict, keys: List[str], records: List[dict]):
is_datetime = lambda k: schema['properties'].get(k, {}).get('format') == 'date-time'
messages = []
for record in records:
rec = dict(
action='upsert',
sequence=int(time.time_ns() / 1000),
data={},
)
for k, v in record.items():
k = k.replace('.', '_')
v = (dateparser.parse(str(v))).isoformat() if v and is_datetime(k) else v
rec['data'][k] = v
messages.append(rec)
payload = dict(
table_name=object_name,
key_names=keys,
schema=schema,
messages=messages,
)
# with open('payload.json', 'w') as file:
# json.dump(payload, file)
url = f'{STITCH_BASE_URL}/v2/import/batch'
resp = stitch_session.post(url, json.dumps(payload))
data : dict = resp.json()
print(data)
status = data.get('status')
if status != 'OK':
pprint(dict(status_code=resp.status_code))
resp.raise_for_status()
else:
print(f'pushed {len(records)} records to "{object_name}"')
def load_state():
global state
state = json.loads(s3.Object(key=STATE_KEY).get()["Body"].read().decode('utf-8'))
# re-run for past 3 days, an attempt to fill in any holes
for k in state:
state[k] = state.get(k, 1420070400) - 3*24*60*60
def save_state():
global state
s3.Object(key=STATE_KEY).put(Body=json.dumps(state))
print(json.dumps(state))
def run(event, context):
global state
try:
# load wootric access token
get_access_token()
# load state
load_state()
except Exception as E:
slack_client.send(text=f"Error occurred for Wootric-Stitch Integration:\n{format_exc()}")
raise E
config_file = Path('config.yaml')
with config_file.open() as file:
object_configs = yaml.load(file)
errors = []
for object_name, object_config in object_configs.items():
records : List[dict] = []
try:
print(f'Loading {object_name}')
while True:
date_key = object_config['date_key']
params = object_config['params']
schema = object_config['schema']
keys = object_config['keys']
data : List[dict] = wootric_request(object_name, date_key, **params)
if len(data) == 0:
if len(records) == 0:
break
else:
records += data
send_batch(object_name, schema, keys, records)
record = records[-1]
if date_key not in record:
raise Exception(f'no datekey: {date_key}')
records = []
date_val = dateparser.parse(record[date_key])
ts_val = int(date_val.timestamp())
if date_val and ts_val > state[object_name]:
state[object_name] = ts_val
save_state()
else:
break
except Exception as E:
errors.append(format_exc())
finally:
save_state()
# Missing users START
# seems users are missing even with using gte. Gets the IDs from database
try:
users = []
for row in get_missing_users():
users += [wootric_user(row.end_user_id)]
response_config = object_configs.get('end_users')
send_batch('end_users', response_config['schema'], response_config['keys'], users)
except Exception as E:
errors.append(format_exc())
# Missing users END
# Missing responses START
# seems some responses are missing even with using gte. Gets the IDs from database
try:
responses = []
for row in get_missing_responses():
responses += [wootric_response(row.user_id, row.last_response__id)]
response_config = object_configs.get('responses')
send_batch('responses', response_config['schema'], response_config['keys'], responses)
except Exception as E:
errors.append(format_exc())
# Missing responses END
if len(errors) > 0:
e = '\n\n'.join(errors)
slack_client.send(text=f'Error occurred for Wootric-Stitch Integration:\n{e}')
raise Exception(e)
# run(None, None)
|
[] |
[] |
[
"AWS_SESSION_TOKEN",
"AWS_ACCESS_KEY_ID_",
"AWS_SECRET_ACCESS_KEY_",
"AWS_SECRET_ACCESS_KEY",
"AWS_BUCKET",
"STITCH_TOKEN",
"SLACK_WH_TOKEN",
"AWS_ACCESS_KEY_ID",
"WOOTRIC_CLIENT_ID",
"STITCH_CLIENT_ID",
"WOOTRIC_CLIENT_SECRET"
] |
[]
|
["AWS_SESSION_TOKEN", "AWS_ACCESS_KEY_ID_", "AWS_SECRET_ACCESS_KEY_", "AWS_SECRET_ACCESS_KEY", "AWS_BUCKET", "STITCH_TOKEN", "SLACK_WH_TOKEN", "AWS_ACCESS_KEY_ID", "WOOTRIC_CLIENT_ID", "STITCH_CLIENT_ID", "WOOTRIC_CLIENT_SECRET"]
|
python
| 11 | 0 | |
build/lib/sightseer/zoo.py
|
import os
import wget
import struct
import shutil
import logging
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, UpSampling2D, concatenate
from tensorflow.keras.models import Model, load_model
from .blocks import ConvBlock, BoundingBox, SightLoader
# disabling warnings and logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.autograph.set_verbosity(tf.compat.v1.logging.ERROR)
logging.disable(logging.WARNING)
class YOLOv3Client(object):
def __init__(self, nms_threshold=0.45, obj_threshold=0.5, net_h=416, net_w=416, anchors=[[116, 90, 156, 198, 373, 326], [30, 61, 62, 45, 59, 119], [10, 13, 16, 30, 33, 23]]):
self.nms_threshold = nms_threshold
self.obj_threshold = obj_threshold
self.net_h, self.net_w = net_h, net_w
self.anchors = anchors
self.yolo_model = None # initialised after weights are loaded into model
self.weights_url = "https://pjreddie.com/media/files/yolov3.weights"
self.all_labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck",
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench",
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana",
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake",
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator",
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
def download_weights(self):
"""
Downloads the weights from online and saves them locally
"""
if os.path.exists("./bin/yolov3.weights"):
print ("Weights already exist. Proceeding to load YOLOv3Client...")
else:
print ("Downloading weights. This may may take a moment...")
wget.download(self.weights_url, os.getcwd() + "/yolov3.weights")
os.mkdir("./bin", 0o755) # configuring admin rights
shutil.move("./yolov3.weights", "./bin/yolov3.weights")
print ("\n\nWeights downloaded successfully!")
def load_architecture(self):
"""
Returns tf.keras.models.Model instance
"""
inp_image = Input(shape=[None, None, 3])
x = ConvBlock.get_conv_block(inp_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
x = ConvBlock.get_conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
x = ConvBlock.get_conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
x = ConvBlock.get_conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
for i in range(7):
x = ConvBlock.get_conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
x = ConvBlock.get_conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
for i in range(7):
x = ConvBlock.get_conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
x = ConvBlock.get_conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
for i in range(3):
x = ConvBlock.get_conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
x = ConvBlock.get_conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
yolo_82 = ConvBlock.get_conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
x = ConvBlock.get_conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
x = ConvBlock.get_conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
yolo_94 = ConvBlock.get_conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
x = ConvBlock.get_conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
yolo_106 = ConvBlock.get_conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(inp_image, [yolo_82, yolo_94, yolo_106])
return model
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def preprocess(self, image):
"""
Resizes image to appropriate dimensions for YOLOv3
"""
new_h, new_w = image.shape[:2]
if (float(self.net_w)/new_w) < (float(self.net_h)/new_h):
new_h = (new_h * self.net_w)//new_w
new_w = self.net_w
else:
new_w = (new_w * self.net_h)//new_h
new_h = self.net_h
# resize the image to the new size
resized = cv2.resize(image[:, :, ::-1]/255., (int(new_w), int(new_h)))
# embed the image into the standard letter box
new_img = np.ones((self.net_h, self.net_w, 3)) * 0.5
new_img[int((self.net_h-new_h)//2):int((self.net_h+new_h)//2), int((self.net_w-new_w)//2):int((self.net_w+new_w)//2), :] = resized
new_img = np.expand_dims(new_img, 0)
return new_img
def interval_overlap(self, int_a, int_b):
x1, x2 = int_a
x3, x4 = int_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2, x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2, x4) - x3
def bbox_iou(self, box1, box2):
"""
Finds IOU between all bounding boxes before non maximum suppression process
"""
int_w = self.interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
int_h = self.interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = int_w * int_h
w1, h1 = box1.xmax - box1.xmin, box1.ymax - box1.ymin
w2, h2 = box2.xmax - box2.xmin, box2.ymax - box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def non_maximum_suppression(self, boxes):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if self.bbox_iou(boxes[index_i], boxes[index_j]) >= self.nms_threshold:
boxes[index_j].classes[c] = 0
return boxes
def decode_preds(self, preds, anchors):
gridh, gridw = preds.shape[:2]
nb_box = 3
preds = preds.reshape([gridh, gridw, nb_box, -1])
nb_class = preds.shape[-1] - 5
boxes = []
preds[..., :2] = self.sigmoid(preds[..., :2])
preds[..., 4:] = self.sigmoid(preds[..., 4:])
preds[..., 5:] = preds[..., 4][..., np.newaxis] * preds[..., 5:]
preds[..., 5:] *= preds[..., 5:] > self.obj_threshold
for i in range(gridh * gridw):
row = i / gridw
col = i % gridw
for b in range(nb_box):
objectness = preds[int(row)][int(col)][b][4]
if (objectness.all() <= self.obj_threshold): continue
x, y, w, h = preds[int(row)][int(col)][b][:4]
x = (col + x) / gridw
y = (row + y) / gridh
w = anchors[2 * b + 0] * np.exp(w) / self.net_w
h = anchors[2 * b + 1] * np.exp(h) / self.net_h
classes = preds[int(row)][col][b][5:]
box = BoundingBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def rectify_boxes(self, boxes, image_h, image_w):
if (float(self.net_w)/image_w) < (float(self.net_h)/image_h):
new_w = self.net_w
new_h = (image_h * self.net_w)/ image_w
else:
new_h = self.net_w
new_w = (image_w * self.net_h) / image_h
for i in range(len(boxes)):
x_offset, x_scale = (self.net_w - new_w)/2./self.net_w, float(new_w)/self.net_w
y_offset, y_scale = (self.net_h - new_h)/2./self.net_h, float(new_h)/self.net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
return boxes
def get_boxes(self, image, boxes, verbose=True, random_coloring=True):
final_boxes = []
for box in boxes:
final_label = ""
label = -1
for i in range(len(self.all_labels)):
if box.classes[i] > self.obj_threshold:
final_label += self.all_labels[i]
label = i
if verbose:
print ("{}: {:.3f}%".format(self.all_labels[i], box.classes[i]*100))
final_boxes.append([final_label,
box.classes[i] * 100,
{
'xmin': box.xmin,
'ymin': box.ymin,
'xmax': box.xmax,
'ymax': box.ymax
}
])
if label >= 0:
if random_coloring:
r, g, b = np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)
else:
r, g, b = 0, 255, 0
cv2.rectangle(image, (box.xmin, box.ymin), (box.xmax, box.ymax), (r, g, b), 2)
cv2.putText(image, '{} {:.3f}'.format(final_label, box.get_confidence()), (box.xmax, box.ymin - 13), cv2.FONT_HERSHEY_SIMPLEX, 1e-3 * image.shape[0], (r, g, b), 2)
return final_boxes, image
def load_model(self, default_path="./bin/yolov3.weights", cache=True, verbose=True):
"""
Downloads weights and config, loads checkpoints into architecture
"""
if os.path.exists("./bin/yolov3.h5"):
print ("Weights already exist. Proceeding to load YOLOv3Client...")
self.yolo_model = load_model("./bin/yolov3.h5")
else:
self.download_weights() # downloading weights from online
loader = SightLoader(default_path)
self.yolo_model = self.load_architecture() # loading weights into model
loader.load_weights(self.yolo_model, verbose)
self.yolo_model.save("./bin/yolov3.h5") # saves .h5 weights file
os.remove("./bin/yolov3.weights") # removes original .weights file
def predict(self, original_image, return_img=False, verbose=True):
"""
Returns a list of BoundingBox metadata (class label, confidence score, coordinates)
and the edited image with bounding boxes and their corresponding text labels/confidence scores
"""
image_h, image_w = original_image.shape[:2]
if self.yolo_model == None:
raise ValueError ("YOLOv3 weights needs to be downloaded and configured into the model before use. You can use the `load_model()` method to do so.")
proc_image = self.preprocess(original_image)
preds = self.yolo_model.predict(proc_image)
boxes = []
for i in range(len(preds)):
boxes += self.decode_preds(preds[i][0], self.anchors[i])
boxes = self.rectify_boxes(boxes, image_h, image_w)
boxes = self.non_maximum_suppression(boxes)
box_list, box_image = self.get_boxes(original_image, boxes, verbose)
if return_img:
box_image = box_image.squeeze()
return box_list, box_image
else:
return box_list
def framewise_predict(self, frames, verbose=True):
final_preds = []
final_frames = []
for i in range(len(frames)):
print ("Frame {}".format(i))
cur_preds, edited_frame = self.predict(frames[i], return_img=True, verbose=False)
final_preds.append(cur_preds)
final_frames.append(edited_frame)
return final_preds, final_frames
class MaskRCNNClient(object):
def __init__(self):
pass
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
prediction_techniques/neural_networks.py
|
import argparse
from helper_funcs import *
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from TimeSeriesCrossValidation import splitTrain
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout, Activation, RepeatVector, TimeDistributed
import config_neural_network_models as cfg_nn_models
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
# ----------------------------------------------------------------------------------------------------
def build_neural_network_model(Recurrent_Neural_Network, n_inputs, n_days):
model = Sequential()
for idx_layer, d_layer in enumerate(Recurrent_Neural_Network):
# Recurrent Neural Network
if str(*d_layer) is 'SimpleRNN':
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(SimpleRNN(**d_layer['SimpleRNN'], input_shape=(n_inputs, 1)))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network)-1):
model.add(SimpleRNN(**d_layer['SimpleRNN'], units=n_days))
else:
model.add(SimpleRNN(**d_layer['SimpleRNN']))
# Long-Short Term-Memory
elif str(*d_layer) is 'LSTM':
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(LSTM(**d_layer['LSTM'], input_shape=(n_inputs, 1)))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network)-1):
model.add(LSTM(**d_layer['LSTM'], units=n_days))
else:
model.add(LSTM(**d_layer['LSTM']))
# Dense (Simple Neuron)
elif str(*d_layer) is 'Dense':
# Is this the input layer? If so, define input_shape
if idx_layer == 0:
model.add(Dense(**d_layer['Dense'], input_dim=n_inputs))
# Is this the last output layer? If so, set units to prediction days
elif idx_layer == (len(Recurrent_Neural_Network)-1):
model.add(Dense(**d_layer['Dense'], units=n_days))
else:
model.add(Dense(**d_layer['Dense']))
# Dropout (Regularization)
elif str(*d_layer) is 'Dropout':
model.add(Dropout(**d_layer['Dropout']))
else:
print(f"Incorrect neuron type: {str(*d_layer)}")
return model
# -------------------------------------------------- MLP --------------------------------------------------
def mlp(l_args, s_ticker, s_interval, df_stock):
parser = argparse.ArgumentParser(prog='mlp',
description="""Multilayer Perceptron. """)
parser.add_argument('-d', "--days", action="store", dest="n_days", type=check_positive, default=5,
help='prediction days.')
parser.add_argument('-i', "--input", action="store", dest="n_inputs", type=check_positive, default=40,
help='number of days to use for prediction.')
parser.add_argument('-e', "--epochs", action="store", dest="n_epochs", type=check_positive, default=200,
help='number of training epochs.')
parser.add_argument('-j', "--jumps", action="store", dest="n_jumps", type=check_positive, default=1,
help='number of jumps in training data.')
parser.add_argument('-p', "--pp", action="store", dest="s_preprocessing", default='normalization',
choices=['normalization', 'standardization', 'none'], help='pre-processing data.')
parser.add_argument('-o', "--optimizer", action="store", dest="s_optimizer", default='adam',
choices=['adam', 'adagrad', 'adadelta', 'adamax', 'ftrl', 'nadam', 'optimizer', 'rmsprop', 'sgd'], help='optimization technique.')
parser.add_argument('-l', "--loss", action="store", dest="s_loss", default='mae',
choices=['mae', 'mape', 'mse', 'msle'], help='loss function.')
try:
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}\n")
return
# Pre-process data
if ns_parser.s_preprocessing == 'standardization':
scaler = StandardScaler()
stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1)))
elif ns_parser.s_preprocessing == 'normalization':
scaler = MinMaxScaler()
stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1)))
else: # No pre-processing
stock_train_data = np.array(df_stock['5. adjusted close'].values.reshape(-1, 1))
# Split training data for the neural network
stock_x, stock_y = splitTrain.split_train(stock_train_data, ns_parser.n_inputs, ns_parser.n_days, numJumps=ns_parser.n_jumps)
stock_x = np.array(stock_x)
stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1]))
stock_y = np.array(stock_y)
stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1]))
# Build Neural Network model
model = build_neural_network_model(cfg_nn_models.MultiLayer_Perceptron, ns_parser.n_inputs, ns_parser.n_days)
model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)
# Train our model
model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1);
print("")
print(model.summary())
print("")
# Prediction
yhat = model.predict(stock_train_data[-ns_parser.n_inputs:].reshape(1, ns_parser.n_inputs), verbose=0)
# Re-scale the data back
if (ns_parser.s_preprocessing == 'standardization') or (ns_parser.s_preprocessing == 'normalization'):
y_pred_test_t = scaler.inverse_transform(yhat.tolist())
else:
y_pred_test_t = yhat
l_pred_days = get_next_stock_market_days(last_stock_day=df_stock['5. adjusted close'].index[-1], n_next_days=ns_parser.n_days)
df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name='Price')
# Plotting
plt.plot(df_stock.index, df_stock['5. adjusted close'], lw=3)
plt.title(f"MLP on {s_ticker} - {ns_parser.n_days} days prediction")
plt.xlim(df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1])
plt.xlabel('Time')
plt.ylabel('Share Price ($)')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.plot([df_stock.index[-1], df_pred.index[0]], [df_stock['5. adjusted close'].values[-1], df_pred.values[0]], lw=1, c='tab:green', linestyle='--')
plt.plot(df_pred.index, df_pred, lw=2, c='tab:green')
plt.axvspan(df_stock.index[-1], df_pred.index[-1], facecolor='tab:orange', alpha=0.2)
xmin, xmax, ymin, ymax = plt.axis()
plt.vlines(df_stock.index[-1], ymin, ymax, colors='k', linewidth=3, linestyle='--', color='k')
plt.show()
# Print prediction data
print("Predicted share price:")
df_pred = df_pred.apply(lambda x: f"{x:.2f} $")
print(df_pred.to_string())
print("")
except:
print("")
# -------------------------------------------------- RNN --------------------------------------------------
def rnn(l_args, s_ticker, s_interval, df_stock):
parser = argparse.ArgumentParser(prog='rnn',
description="""Recurrent Neural Network. """)
parser.add_argument('-d', "--days", action="store", dest="n_days", type=check_positive, default=5,
help='prediction days.')
parser.add_argument('-i', "--input", action="store", dest="n_inputs", type=check_positive, default=40,
help='number of days to use for prediction.')
parser.add_argument('-e', "--epochs", action="store", dest="n_epochs", type=check_positive, default=200,
help='number of training epochs.')
parser.add_argument('-j', "--jumps", action="store", dest="n_jumps", type=check_positive, default=1,
help='number of jumps in training data.')
parser.add_argument('-p', "--pp", action="store", dest="s_preprocessing", default='normalization',
choices=['normalization', 'standardization', 'none'], help='pre-processing data.')
parser.add_argument('-o', "--optimizer", action="store", dest="s_optimizer", default='adam', help='optimizer technique',
choices=['adam', 'adagrad', 'adadelta', 'adamax', 'ftrl', 'nadam', 'optimizer', 'rmsprop', 'sgd'])
parser.add_argument('-l', "--loss", action="store", dest="s_loss", default='mae',
choices=['mae', 'mape', 'mse', 'msle'], help='loss function.')
try:
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}\n")
return
# Pre-process data
if ns_parser.s_preprocessing == 'standardization':
scaler = StandardScaler()
stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1)))
elif ns_parser.s_preprocessing == 'normalization':
scaler = MinMaxScaler()
stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1)))
else: # No pre-processing
stock_train_data = np.array(df_stock['5. adjusted close'].values.reshape(-1, 1))
# Split training data for the neural network
stock_x, stock_y = splitTrain.split_train(stock_train_data, ns_parser.n_inputs, ns_parser.n_days, numJumps=ns_parser.n_jumps)
stock_x = np.array(stock_x)
stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1))
stock_y = np.array(stock_y)
stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1))
# Build Neural Network model
model = build_neural_network_model(cfg_nn_models.Recurrent_Neural_Network, ns_parser.n_inputs, ns_parser.n_days)
model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)
# Train our model
model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1);
print("")
print(model.summary())
print("")
# Prediction
yhat = model.predict(stock_train_data[-ns_parser.n_inputs:].reshape(1, ns_parser.n_inputs, 1), verbose=0)
# Re-scale the data back
if (ns_parser.s_preprocessing == 'standardization') or (ns_parser.s_preprocessing == 'normalization'):
y_pred_test_t = scaler.inverse_transform(yhat.tolist())
else:
y_pred_test_t = yhat
l_pred_days = get_next_stock_market_days(last_stock_day=df_stock['5. adjusted close'].index[-1], n_next_days=ns_parser.n_days)
df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name='Price')
# Plotting
plt.plot(df_stock.index, df_stock['5. adjusted close'], lw=3)
plt.title(f"RNN on {s_ticker} - {ns_parser.n_days} days prediction")
plt.xlim(df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1])
plt.xlabel('Time')
plt.ylabel('Share Price ($)')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.plot([df_stock.index[-1], df_pred.index[0]], [df_stock['5. adjusted close'].values[-1], df_pred.values[0]], lw=1, c='tab:green', linestyle='--')
plt.plot(df_pred.index, df_pred, lw=2, c='tab:green')
plt.axvspan(df_stock.index[-1], df_pred.index[-1], facecolor='tab:orange', alpha=0.2)
xmin, xmax, ymin, ymax = plt.axis()
plt.vlines(df_stock.index[-1], ymin, ymax, colors='k', linewidth=3, linestyle='--', color='k')
plt.show()
# Print prediction data
print("Predicted share price:")
df_pred = df_pred.apply(lambda x: f"{x:.2f} $")
print(df_pred.to_string())
print("")
except:
print("")
# -------------------------------------------------- LSTM --------------------------------------------------
def lstm(l_args, s_ticker, s_interval, df_stock):
parser = argparse.ArgumentParser(prog='lstm',
description="""Long-Short Term Memory. """)
parser.add_argument('-d', "--days", action="store", dest="n_days", type=check_positive, default=5,
help='prediction days')
parser.add_argument('-i', "--input", action="store", dest="n_inputs", type=check_positive, default=40,
help='number of days to use for prediction.')
parser.add_argument('-e', "--epochs", action="store", dest="n_epochs", type=check_positive, default=200,
help='number of training epochs.')
parser.add_argument('-j', "--jumps", action="store", dest="n_jumps", type=check_positive, default=1,
help='number of jumps in training data.')
parser.add_argument('-p', "--pp", action="store", dest="s_preprocessing", default='normalization',
choices=['normalization', 'standardization', 'none'], help='pre-processing data.')
parser.add_argument('-o', "--optimizer", action="store", dest="s_optimizer", default='adam', help='optimization technique.',
choices=['adam', 'adagrad', 'adadelta', 'adamax', 'ftrl', 'nadam', 'optimizer', 'rmsprop', 'sgd'])
parser.add_argument('-l', "--loss", action="store", dest="s_loss", default='mae',
choices=['mae', 'mape', 'mse', 'msle'], help='loss function.')
try:
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}\n")
return
# Pre-process data
if ns_parser.s_preprocessing == 'standardization':
scaler = StandardScaler()
stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1)))
elif ns_parser.s_preprocessing == 'normalization':
scaler = MinMaxScaler()
stock_train_data = scaler.fit_transform(np.array(df_stock['5. adjusted close'].values.reshape(-1, 1)))
else: # No pre-processing
stock_train_data = np.array(df_stock['5. adjusted close'].values.reshape(-1, 1))
# Split training data for the neural network
stock_x, stock_y = splitTrain.split_train(stock_train_data, ns_parser.n_inputs, ns_parser.n_days, numJumps=ns_parser.n_jumps)
stock_x = np.array(stock_x)
stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1))
stock_y = np.array(stock_y)
stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1))
# Build Neural Network model
model = build_neural_network_model(cfg_nn_models.Long_Short_Term_Memory, ns_parser.n_inputs, ns_parser.n_days)
model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)
# Train our model
model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1);
print("")
print(model.summary())
print("")
# Prediction
yhat = model.predict(stock_train_data[-ns_parser.n_inputs:].reshape(1, ns_parser.n_inputs, 1), verbose=0)
# Re-scale the data back
if (ns_parser.s_preprocessing == 'standardization') or (ns_parser.s_preprocessing == 'normalization'):
y_pred_test_t = scaler.inverse_transform(yhat.tolist())
else:
y_pred_test_t = yhat
l_pred_days = get_next_stock_market_days(last_stock_day=df_stock['5. adjusted close'].index[-1], n_next_days=ns_parser.n_days)
df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name='Price')
# Plotting
plt.plot(df_stock.index, df_stock['5. adjusted close'], lw=3)
plt.title(f"LSTM on {s_ticker} - {ns_parser.n_days} days prediction")
plt.xlim(df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1])
plt.xlabel('Time')
plt.ylabel('Share Price ($)')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)
plt.plot([df_stock.index[-1], df_pred.index[0]], [df_stock['5. adjusted close'].values[-1], df_pred.values[0]], lw=1, c='tab:green', linestyle='--')
plt.plot(df_pred.index, df_pred, lw=2, c='tab:green')
plt.axvspan(df_stock.index[-1], df_pred.index[-1], facecolor='tab:orange', alpha=0.2)
xmin, xmax, ymin, ymax = plt.axis()
plt.vlines(df_stock.index[-1], ymin, ymax, colors='k', linewidth=3, linestyle='--', color='k')
plt.show()
# Print prediction data
print("Predicted share price:")
df_pred = df_pred.apply(lambda x: f"{x:.2f} $")
print(df_pred.to_string())
print("")
except:
print("")
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
contrib/zebra/src/java/org/apache/hadoop/zebra/mapreduce/BasicTableOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.zebra.mapreduce;
import java.io.IOException;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.zebra.io.BasicTable;
import org.apache.hadoop.zebra.io.TableInserter;
import org.apache.hadoop.zebra.parser.ParseException;
import org.apache.hadoop.zebra.types.Partition;
import org.apache.hadoop.zebra.types.SortInfo;
import org.apache.hadoop.zebra.types.ZebraConf;
import org.apache.hadoop.zebra.types.TypesUtils;
import org.apache.hadoop.zebra.schema.Schema;
import org.apache.hadoop.zebra.tfile.TFile;
import org.apache.pig.data.Tuple;
import org.apache.hadoop.zebra.pig.comparator.*;
/**
* {@link org.apache.hadoop.mapreduce.OutputFormat} class for creating a
* BasicTable.
*
* Usage Example:
* <p>
* In the main program, add the following code.
*
* <pre>
* job.setOutputFormatClass(BasicTableOutputFormat.class);
* Path outPath = new Path("path/to/the/BasicTable");
* BasicTableOutputFormat.setOutputPath(job, outPath);
* BasicTableOutputFormat.setSchema(job, "Name, Age, Salary, BonusPct");
* </pre>
*
* The above code does the following things:
* <UL>
* <LI>Set the output format class to BasicTableOutputFormat.
* <LI>Set the single path to the BasicTable to be created.
* <LI>Set the schema of the BasicTable to be created. In this case, the
* to-be-created BasicTable contains three columns with names "Name", "Age",
* "Salary", "BonusPct".
* </UL>
*
* To create multiple output paths. ZebraOutputPartitoner interface needs to be implemented
* <pre>
* String multiLocs = "commaSeparatedPaths"
* job.setOutputFormatClass(BasicTableOutputFormat.class);
* BasicTableOutputFormat.setMultipleOutputPaths(job, multiLocs);
* job.setOutputFormat(BasicTableOutputFormat.class);
* BasicTableOutputFormat.setSchema(job, "Name, Age, Salary, BonusPct");
* BasicTableOutputFormat.setZebraOutputPartitionClass(
* job, MultipleOutputsTest.OutputPartitionerClass.class);
* </pre>
*
*
* The user ZebraOutputPartitionClass should like this
*
* <pre>
*
* static class OutputPartitionerClass implements ZebraOutputPartition {
* @Override
* public int getOutputPartition(BytesWritable key, Tuple value) {
*
* return someIndexInOutputParitionlist0;
* }
*
* </pre>
*
*
* The user Reducer code (or similarly Mapper code if it is a Map-only job)
* should look like the following:
*
* <pre>
* static class MyReduceClass implements Reducer<K, V, BytesWritable, Tuple> {
* // keep the tuple object for reuse.
* Tuple outRow;
* // indices of various fields in the output Tuple.
* int idxName, idxAge, idxSalary, idxBonusPct;
*
* @Override
* public void configure(Job job) {
* Schema outSchema = BasicTableOutputFormat.getSchema(job);
* // create a tuple that conforms to the output schema.
* outRow = TypesUtils.createTuple(outSchema);
* // determine the field indices.
* idxName = outSchema.getColumnIndex("Name");
* idxAge = outSchema.getColumnIndex("Age");
* idxSalary = outSchema.getColumnIndex("Salary");
* idxBonusPct = outSchema.getColumnIndex("BonusPct");
* }
*
* @Override
* public void reduce(K key, Iterator<V> values,
* OutputCollector<BytesWritable, Tuple> output, Reporter reporter)
* throws IOException {
* String name;
* int age;
* int salary;
* double bonusPct;
* // ... Determine the value of the individual fields of the row to be inserted.
* try {
* outTuple.set(idxName, name);
* outTuple.set(idxAge, new Integer(age));
* outTuple.set(idxSalary, new Integer(salary));
* outTuple.set(idxBonusPct, new Double(bonusPct));
* output.collect(new BytesWritable(name.getBytes()), outTuple);
* }
* catch (ExecException e) {
* // should never happen
* }
* }
*
* @Override
* public void close() throws IOException {
* // no-op
* }
*
* }
* </pre>
*/
public class BasicTableOutputFormat extends OutputFormat<BytesWritable, Tuple> {
/**
* Set the multiple output paths of the BasicTable in JobContext
*
* @param jobContext
* The JobContext object.
* @param commaSeparatedLocations
* The comma separated output paths to the tables.
* The path must either not existent, or must be an empty directory.
* @param theClass
* Zebra output partitioner class
*
* @deprecated Use {@link #setMultipleOutputs(JobContext, class<? extends ZebraOutputPartition>, Path ...)} instead.
*
*/
public static void setMultipleOutputs(JobContext jobContext, String commaSeparatedLocations, Class<? extends ZebraOutputPartition> theClass)
throws IOException {
Configuration conf = jobContext.getConfiguration();
ZebraConf.setMultiOutputPath(conf, commaSeparatedLocations);
if (ZebraConf.getIsMulti(conf, true) == false) {
throw new IllegalArgumentException("Job has been setup as single output path");
}
ZebraConf.setIsMulti(conf, true);
setZebraOutputPartitionClass(jobContext, theClass);
}
/**
* Set the multiple output paths of the BasicTable in JobContext
*
* @param jobContext
* The JobContext object.
* @param theClass
* Zebra output partitioner class
* @param paths
* The list of paths
* The path must either not existent, or must be an empty directory.
*/
public static void setMultipleOutputs(JobContext jobContext, Class<? extends ZebraOutputPartition> theClass, Path... paths)
throws IOException {
Configuration conf = jobContext.getConfiguration();
FileSystem fs = FileSystem.get( conf );
Path path = paths[0].makeQualified(fs);
StringBuffer str = new StringBuffer(StringUtils.escapeString(path.toString()));
for(int i = 1; i < paths.length;i++) {
str.append(StringUtils.COMMA_STR);
path = paths[i].makeQualified(fs);
str.append(StringUtils.escapeString(path.toString()));
}
ZebraConf.setMultiOutputPath(conf, str.toString());
if (ZebraConf.getIsMulti(conf, true) == false) {
throw new IllegalArgumentException("Job has been setup as single output path");
}
ZebraConf.setIsMulti(conf, true);
setZebraOutputPartitionClass(jobContext, theClass);
}
/**
* Set the multiple output paths of the BasicTable in JobContext
*
* @param jobContext
* The JobContext object.
* @param theClass
* Zebra output partitioner class
* @param arguments
* Arguments string to partitioner class
* @param paths
* The list of paths
* The path must either not existent, or must be an empty directory.
*/
public static void setMultipleOutputs(JobContext jobContext, Class<? extends ZebraOutputPartition> theClass, String arguments, Path... paths)
throws IOException {
setMultipleOutputs(jobContext, theClass, paths);
if (arguments != null) {
ZebraConf.setOutputPartitionClassArguments(jobContext.getConfiguration(), arguments);
}
}
/**
* Get the output partition class arguments string from job configuration
*
* @param conf
* The job configuration object.
* @return the output partition class arguments string.
*/
public static String getOutputPartitionClassArguments(Configuration conf) {
return ZebraConf.getOutputPartitionClassArguments(conf);
}
/**
* Get the multiple output paths of the BasicTable from JobContext
*
* @param jobContext
* The JobContext object.
* @return path
* The comma separated output paths to the tables.
* The path must either not existent, or must be an empty directory.
*/
public static Path[] getOutputPaths(JobContext jobContext)
throws IOException {
Configuration conf = jobContext.getConfiguration();
Path[] result;
String paths = ZebraConf.getMultiOutputPath(conf);
String path = ZebraConf.getOutputPath(conf);
if(paths != null && path != null) {
throw new IllegalArgumentException("Illegal output paths specs. Both multi and single output locs are set");
}
if (ZebraConf.getIsMulti(conf, false) == true) {
if (paths == null || paths.equals("")) {
throw new IllegalArgumentException("Illegal multi output paths");
}
String [] list = StringUtils.split(paths);
result = new Path[list.length];
for (int i = 0; i < list.length; i++) {
result[i] = new Path(StringUtils.unEscapeString(list[i]));
}
} else {
if (path == null || path.equals("")) {
throw new IllegalArgumentException("Cannot find output path");
}
result = new Path[1];
result[0] = new Path(path);
}
return result;
}
private static void setZebraOutputPartitionClass(
JobContext jobContext, Class<? extends ZebraOutputPartition> theClass) throws IOException {
if (!ZebraOutputPartition.class.isAssignableFrom(theClass))
throw new IOException(theClass+" not "+ZebraOutputPartition.class.getName());
ZebraConf.setZebraOutputPartitionerClass(jobContext.getConfiguration(), theClass.getName());
}
public static Class<? extends ZebraOutputPartition> getZebraOutputPartitionClass(JobContext jobContext) throws IOException {
Configuration conf = jobContext.getConfiguration();
Class<?> theClass;
String valueString = ZebraConf.getZebraOutputPartitionerClass(conf);
if (valueString == null)
throw new IOException("zebra output partitioner class not found");
try {
theClass = conf.getClassByName(valueString);
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
if (theClass != null && !ZebraOutputPartition.class.isAssignableFrom(theClass))
throw new IOException(theClass+" not "+ZebraOutputPartition.class.getName());
else if (theClass != null)
return theClass.asSubclass(ZebraOutputPartition.class);
else
return null;
}
/**
* Set the output path of the BasicTable in JobContext
*
* @param jobContext
* The JobContext object.
* @param path
* The output path to the table. The path must either not existent,
* or must be an empty directory.
*/
public static void setOutputPath(JobContext jobContext, Path path) {
Configuration conf = jobContext.getConfiguration();
ZebraConf.setOutputPath(conf, path.toString());
if (ZebraConf.getIsMulti(conf, false) == true) {
throw new IllegalArgumentException("Job has been setup as multi output paths");
}
ZebraConf.setIsMulti(conf, false);
}
/**
* Get the output path of the BasicTable from JobContext
*
* @param jobContext
* jobContext object
* @return The output path.
*/
public static Path getOutputPath(JobContext jobContext) {
Configuration conf = jobContext.getConfiguration();
String path = ZebraConf.getOutputPath(conf);
return (path == null) ? null : new Path(path);
}
/**
* Set the table schema in JobContext
*
* @param jobContext
* The JobContext object.
* @param schema
* The schema of the BasicTable to be created. For the initial
* implementation, the schema string is simply a comma separated list
* of column names, such as "Col1, Col2, Col3".
*
* @deprecated Use {@link #setStorageInfo(JobContext, ZebraSchema, ZebraStorageHint, ZebraSortInfo)} instead.
*/
public static void setSchema(JobContext jobContext, String schema) {
Configuration conf = jobContext.getConfiguration();
ZebraConf.setOutputSchema(conf, Schema.normalize(schema));
// This is to turn off type check for potential corner cases - for internal use only;
if (System.getenv("zebra_output_checktype")!= null && System.getenv("zebra_output_checktype").equals("no")) {
ZebraConf.setCheckType(conf, false);
}
}
/**
* Get the table schema in JobContext.
*
* @param jobContext
* The JobContext object.
* @return The output schema of the BasicTable. If the schema is not defined
* in the jobContext object at the time of the call, null will be returned.
*/
public static Schema getSchema(JobContext jobContext) throws ParseException {
Configuration conf = jobContext.getConfiguration();
String schema = ZebraConf.getOutputSchema(conf);
if (schema == null) {
return null;
}
//schema = schema.replaceAll(";", ",");
return new Schema(schema);
}
private static KeyGenerator makeKeyBuilder(byte[] elems) {
ComparatorExpr[] exprs = new ComparatorExpr[elems.length];
for (int i = 0; i < elems.length; ++i) {
exprs[i] = ExprUtils.primitiveComparator(i, elems[i]);
}
return new KeyGenerator(ExprUtils.tupleComparator(exprs));
}
/**
* Generates a zebra specific sort key generator which is used to generate BytesWritable key
* Sort Key(s) are used to generate this object
*
* @param jobContext
* The JobContext object.
* @return Object of type zebra.pig.comaprator.KeyGenerator.
*
*/
public static Object getSortKeyGenerator(JobContext jobContext) throws IOException, ParseException {
SortInfo sortInfo = getSortInfo( jobContext );
Schema schema = getSchema(jobContext);
String[] sortColNames = sortInfo.getSortColumnNames();
byte[] types = new byte[sortColNames.length];
for(int i =0 ; i < sortColNames.length; ++i){
types[i] = schema.getColumn(sortColNames[i]).getType().pigDataType();
}
KeyGenerator builder = makeKeyBuilder(types);
return builder;
}
/**
* Generates a BytesWritable key for the input key
* using keygenerate provided. Sort Key(s) are used to generate this object
*
* @param builder
* Opaque key generator created by getSortKeyGenerator() method
* @param t
* Tuple to create sort key from
* @return ByteWritable Key
*
*/
public static BytesWritable getSortKey(Object builder, Tuple t) throws Exception {
KeyGenerator kg = (KeyGenerator) builder;
return kg.generateKey(t);
}
/**
* Set the table storage hint in JobContext, should be called after setSchema is
* called.
* <br> <br>
*
* Note that the "secure by" feature is experimental now and subject to
* changes in the future.
*
* @param jobContext
* The JobContext object.
* @param storehint
* The storage hint of the BasicTable to be created. The format would
* be like "[f1, f2.subfld]; [f3, f4]".
*
* @deprecated Use {@link #setStorageInfo(JobContext, ZebraSchema, ZebraStorageHint, ZebraSortInfo)} instead.
*/
public static void setStorageHint(JobContext jobContext, String storehint) throws ParseException, IOException {
Configuration conf = jobContext.getConfiguration();
String schema = ZebraConf.getOutputSchema(conf);
if (schema == null)
throw new ParseException("Schema has not been set");
// for sanity check purpose only
new Partition(schema, storehint, null);
ZebraConf.setOutputStorageHint(conf, storehint);
}
/**
* Get the table storage hint in JobContext.
*
* @param jobContext
* The JobContext object.
* @return The storage hint of the BasicTable. If the storage hint is not
* defined in the jobContext object at the time of the call, an empty string
* will be returned.
*/
public static String getStorageHint(JobContext jobContext) {
Configuration conf = jobContext.getConfiguration();
String storehint = ZebraConf.getOutputStorageHint(conf);
return storehint == null ? "" : storehint;
}
/**
* Set the sort info
*
* @param jobContext
* The JobContext object.
*
* @param sortColumns
* Comma-separated sort column names
*
* @param comparatorClass
* comparator class name; null for default
*
* @deprecated Use {@link #setStorageInfo(JobContext, ZebraSchema, ZebraStorageHint, ZebraSortInfo)} instead.
*/
public static void setSortInfo(JobContext jobContext, String sortColumns, Class<? extends RawComparator<Object>> comparatorClass) {
Configuration conf = jobContext.getConfiguration();
ZebraConf.setOutputSortColumns(conf, sortColumns);
if (comparatorClass != null)
ZebraConf.setOutputComparator(conf, TFile.COMPARATOR_JCLASS+comparatorClass.getName());
}
/**
* Set the sort info
*
* @param jobContext
* The JobContext object.
*
* @param sortColumns
* Comma-separated sort column names
*
* @deprecated Use {@link #setStorageInfo(JobContext, ZebraSchema, ZebraStorageHint, ZebraSortInfo)} instead.
*/
public static void setSortInfo(JobContext jobContext, String sortColumns) {
ZebraConf.setOutputSortColumns(jobContext.getConfiguration(), sortColumns);
}
/**
* Set the table storage info including ZebraSchema,
*
* @param jobcontext
* The JobContext object.
*
* @param zSchema The ZebraSchema object containing schema information.
*
* @param zStorageHint The ZebraStorageHint object containing storage hint information.
*
* @param zSortInfo The ZebraSortInfo object containing sorting information.
*
*/
public static void setStorageInfo(JobContext jobContext, ZebraSchema zSchema, ZebraStorageHint zStorageHint, ZebraSortInfo zSortInfo)
throws ParseException, IOException {
String schemaStr = null;
String storageHintStr = null;
/* validity check on schema*/
if (zSchema == null) {
throw new IllegalArgumentException("ZebraSchema object cannot be null.");
} else {
schemaStr = zSchema.toString();
}
Schema schema = null;
try {
schema = new Schema(schemaStr);
} catch (ParseException e) {
throw new ParseException("[" + zSchema + "] " + " is not a valid schema string: " + e.getMessage());
}
/* validity check on storage hint*/
if (zStorageHint == null) {
storageHintStr = "";
} else {
storageHintStr = zStorageHint.toString();
}
try {
new Partition(schemaStr, storageHintStr, null);
} catch (ParseException e) {
throw new ParseException("[" + zStorageHint + "] " + " is not a valid storage hint string: " + e.getMessage() );
} catch (IOException e) {
throw new ParseException("[" + zStorageHint + "] " + " is not a valid storage hint string: " + e.getMessage() );
}
Configuration conf = jobContext.getConfiguration();
ZebraConf.setOutputSchema(conf, schemaStr);
ZebraConf.setOutputStorageHint(conf, storageHintStr);
/* validity check on sort info if user specifies it */
if (zSortInfo != null) {
String sortColumnsStr = zSortInfo.getSortColumns();
String comparatorStr = zSortInfo.getComparator();
/* Check existence of comparable class if user specifies it */
if (comparatorStr != null && comparatorStr != "") {
try {
conf.getClassByName(comparatorStr.substring(TFile.COMPARATOR_JCLASS.length()).trim());
} catch (ClassNotFoundException e) {
throw new IOException("comparator Class cannot be found : " + e.getMessage());
}
}
try {
SortInfo.parse(sortColumnsStr, schema, comparatorStr);
} catch (IOException e) {
throw new IOException("[" + sortColumnsStr + " + " + comparatorStr + "] "
+ "is not a valid sort configuration: " + e.getMessage());
}
if (sortColumnsStr != null)
ZebraConf.setOutputSortColumns(conf, sortColumnsStr);
if (comparatorStr != null)
ZebraConf.setOutputComparator(conf, comparatorStr);
}
}
/**
* Get the SortInfo object
*
* @param jobContext
* The JobContext object.
* @return SortInfo object; null if the Zebra table is unsorted
*
*/
public static SortInfo getSortInfo(JobContext jobContext)throws IOException
{
Configuration conf = jobContext.getConfiguration();
String sortColumns = ZebraConf.getOutputSortColumns(conf);
if (sortColumns == null)
return null;
Schema schema = null;
try {
schema = getSchema(jobContext);
} catch (ParseException e) {
throw new IOException("Schema parsing failure : "+e.getMessage());
}
if (schema == null)
throw new IOException("Schema not defined");
String comparator = getComparator(jobContext);
return SortInfo.parse(sortColumns, schema, comparator);
}
/**
* Get the comparator for sort columns
*
* @param jobContext
* The JobContext object.
* @return comparator String
*
*/
private static String getComparator(JobContext jobContext)
{
return ZebraConf.getOutputComparator(jobContext.getConfiguration());
}
/**
* Get the output table as specified in JobContext. It is useful for applications
* to add more meta data after all rows have been added to the table.
*
* @param conf
* The JobContext object.
* @return The output BasicTable.Writer object.
* @throws IOException
*/
private static BasicTable.Writer[] getOutput(JobContext jobContext) throws IOException {
Path[] paths = getOutputPaths(jobContext);
BasicTable.Writer[] writers = new BasicTable.Writer[paths.length];
for(int i = 0; i < paths.length; i++) {
writers[i] = new BasicTable.Writer(paths[i], jobContext.getConfiguration());
}
return writers;
}
/**
* Note: we perform the Initialization of the table here. So we expect this to
* be called before
* {@link BasicTableOutputFormat#getRecordWriter(FileSystem, JobContext, String, Progressable)}
*
* @see OutputFormat#checkOutputSpecs(JobContext)
*/
@Override
public void checkOutputSpecs(JobContext jobContext)
throws IOException {
Configuration conf = jobContext.getConfiguration();
String schema = ZebraConf.getOutputSchema(conf);
if (schema == null) {
throw new IllegalArgumentException("Cannot find output schema");
}
String storehint, sortColumns, comparator;
storehint = getStorageHint(jobContext);
sortColumns = (getSortInfo(jobContext) == null ? null : SortInfo.toSortString(getSortInfo(jobContext).getSortColumnNames()));
comparator = getComparator( jobContext );
Path[] paths = getOutputPaths(jobContext);
for (Path path : paths) {
BasicTable.Writer writer =
new BasicTable.Writer(path, schema, storehint, sortColumns, comparator, conf);
writer.finish();
}
}
/**
* @see OutputFormat#getRecordWriter(TaskAttemptContext)
*/
@Override
public RecordWriter<BytesWritable, Tuple> getRecordWriter(TaskAttemptContext taContext)
throws IOException {
String path = ZebraConf.getOutputPath(taContext.getConfiguration());
return new TableRecordWriter(path, taContext);
}
/**
* Close the output BasicTable, No more rows can be added into the table. A
* BasicTable is not visible for reading until it is "closed".
*
* @param jobContext
* The JobContext object.
* @throws IOException
*/
public static void close(JobContext jobContext) throws IOException {
BasicTable.Writer tables[] = getOutput(jobContext);
for(int i =0; i < tables.length; ++i) {
tables[i].close();
}
}
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext taContext)
throws IOException, InterruptedException {
return new TableOutputCommitter( taContext ) ;
}
}
class TableOutputCommitter extends OutputCommitter {
public TableOutputCommitter(TaskAttemptContext taContext) {
}
@Override
public void abortTask(TaskAttemptContext taContext) throws IOException {
}
@Override
public void cleanupJob(JobContext jobContext) throws IOException {
}
@Override
public void commitTask(TaskAttemptContext taContext) throws IOException {
}
@Override
public boolean needsTaskCommit(TaskAttemptContext taContext)
throws IOException {
return false;
}
@Override
public void setupJob(JobContext jobContext) throws IOException {
// TODO Auto-generated method stub
}
@Override
public void setupTask(TaskAttemptContext taContext) throws IOException {
// TODO Auto-generated method stub
}
}
/**
* Adaptor class for BasicTable RecordWriter.
*/
class TableRecordWriter extends RecordWriter<BytesWritable, Tuple> {
private final TableInserter inserter[];
private org.apache.hadoop.zebra.mapreduce.ZebraOutputPartition op = null;
// for Pig's call path;
final private BytesWritable KEY0 = new BytesWritable(new byte[0]);
private int[] sortColIndices = null;
private KeyGenerator builder = null;
private Tuple t = null;
public TableRecordWriter(String path, TaskAttemptContext context) throws IOException {
Configuration conf = context.getConfiguration();
if(ZebraConf.getIsMulti(conf, false) == true) {
op = (org.apache.hadoop.zebra.mapreduce.ZebraOutputPartition)
ReflectionUtils.newInstance(BasicTableOutputFormat.getZebraOutputPartitionClass(context), conf);
}
boolean checkType = ZebraConf.getCheckType(conf, true);
Path [] paths = BasicTableOutputFormat.getOutputPaths(context);
inserter = new TableInserter[paths.length];
String inserterName = "part-" + context.getTaskAttemptID().getTaskID().getId();
for(int i = 0; i < paths.length; ++i) {
BasicTable.Writer writer =
new BasicTable.Writer(paths[i], conf);
this.inserter[i] = writer.getInserter( inserterName, true, checkType);
// Set up SortInfo related stuff only once;
if (i == 0) {
if (writer.getSortInfo() != null)
{
sortColIndices = writer.getSortInfo().getSortIndices();
SortInfo sortInfo = writer.getSortInfo();
String[] sortColNames = sortInfo.getSortColumnNames();
org.apache.hadoop.zebra.schema.Schema schema = writer.getSchema();
byte[] types = new byte[sortColNames.length];
for(int j =0 ; j < sortColNames.length; ++j){
types[j] = schema.getColumn(sortColNames[j]).getType().pigDataType();
}
t = TypesUtils.createTuple(sortColNames.length);
builder = makeKeyBuilder(types);
}
}
}
}
private KeyGenerator makeKeyBuilder(byte[] elems) {
ComparatorExpr[] exprs = new ComparatorExpr[elems.length];
for (int i = 0; i < elems.length; ++i) {
exprs[i] = ExprUtils.primitiveComparator(i, elems[i]);
}
return new KeyGenerator(ExprUtils.tupleComparator(exprs));
}
@Override
public void close(TaskAttemptContext context) throws IOException {
for(int i = 0; i < this.inserter.length; ++i) {
inserter[i].close();
}
}
@Override
public void write(BytesWritable key, Tuple value) throws IOException {
if (key == null) {
if (sortColIndices != null) { // If this is a sorted table and key is null (Pig's call path);
for (int i =0; i < sortColIndices.length;++i) {
t.set(i, value.get(sortColIndices[i]));
}
key = builder.generateKey(t);
} else { // for unsorted table;
key = KEY0;
}
}
if(op != null ) {
int idx = op.getOutputPartition(key, value);
if(idx < 0 || (idx >= inserter.length)) {
throw new IllegalArgumentException("index returned by getOutputPartition is out of range");
}
inserter[idx].insert(key, value);
} else {
inserter[0].insert(key, value);
}
}
}
|
[
"\"zebra_output_checktype\"",
"\"zebra_output_checktype\""
] |
[] |
[
"zebra_output_checktype"
] |
[]
|
["zebra_output_checktype"]
|
java
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'momeback.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
ipawac_assistant/assistant/plugins/utilities/paths.py
|
# -*- coding: utf-8-*-
import os
# Jasper main directory
APP_PATH = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir, os.pardir))
CONFIG_PATH = os.path.expanduser(os.getenv('IPAWAC_CONFIG'))
PLUGIN_PATH = os.path.normpath(os.path.join(APP_PATH, 'assistant'))
LIB_PATH = os.path.join(APP_PATH, "client")
MODULE_PATH = os.path.join(APP_PATH, "assistant-modules")
DATA_PATH = os.path.join(APP_PATH, "static")
APP_CREDENTIALS_PATH = os.path.join(
LIB_PATH, "application-credentials")
USER_CREDENTIALS_PATH = CONFIG_PATH
AUDIO_PATH = os.path.join(DATA_PATH, "audio")
RETHINKDB_DATA_PATH = os.path.join(DATA_PATH, "rethinkDB")
MODELS_PATH = os.path.join(DATA_PATH, "models")
SNOWBOY_MODEL_PATH = os.path.join(MODELS_PATH, "snowboy-models")
HAAR_PATH = os.path.join(MODELS_PATH, "haarcascades")
FRONT_CASCADE_PATH = os.path.join(
HAAR_PATH, "haarcascade_frontalface_default.xml")
FRONT_LPB_CASCADE_PATH = os.path.join(
HAAR_PATH, "lbpcascade_frontalface_improved.xml")
RIGHT_LPB_CASCADE_PATH = os.path.join(HAAR_PATH, "lbpcascade_profileface.xml")
FACE_MODELS = os.path.join(MODELS_PATH, 'face-models')
FACE_FISCHER_MODEL = os.path.join(FACE_MODELS, 'fisher_trained_data.xml')
folder_paths = [APP_PATH, CONFIG_PATH,
PLUGIN_PATH, LIB_PATH,
DATA_PATH, MODULE_PATH,
USER_CREDENTIALS_PATH, AUDIO_PATH,
RETHINKDB_DATA_PATH, MODELS_PATH,
SNOWBOY_MODEL_PATH, HAAR_PATH,
FRONT_CASCADE_PATH, FRONT_LPB_CASCADE_PATH,
RIGHT_LPB_CASCADE_PATH, FACE_MODELS,
FACE_FISCHER_MODEL, '', ]
# for d in folder_paths:
# print(d)
# if not os.path.isdir(d):
# print(d + " does not exist")
if not os.path.isdir(FACE_MODELS):
os.makedirs(FACE_MODELS)
if not os.path.isfile(FACE_FISCHER_MODEL):
file = open(FACE_FISCHER_MODEL, 'w+')
IMAGES_FOLDER = os.path.join(DATA_PATH, "images")
FACES_PATH = os.path.join(IMAGES_FOLDER, "faces")
WEATHER_ICONS = os.path.join(IMAGES_FOLDER, "weather-icons-png")
if not os.path.isdir(RETHINKDB_DATA_PATH):
os.makedirs(RETHINKDB_DATA_PATH)
if not os.path.isdir(FACES_PATH):
os.makedirs(FACES_PATH)
if not os.path.isdir(WEATHER_ICONS):
os.makedirs(WEATHER_ICONS)
def config(*fname):
return os.path.join(CONFIG_PATH, *fname)
def data(*fname):
return os.path.join(DATA_PATH, *fname)
|
[] |
[] |
[
"IPAWAC_CONFIG"
] |
[]
|
["IPAWAC_CONFIG"]
|
python
| 1 | 0 | |
rorow/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rorow.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
lab187/vendor/github.com/elastic/go-elasticsearch/esapi/api.indices.clear_cache.go
|
// Code generated from specification version 7.1.0 (8dc8fc507d9): DO NOT EDIT
package esapi
import (
"context"
"strconv"
"strings"
)
func newIndicesClearCacheFunc(t Transport) IndicesClearCache {
return func(o ...func(*IndicesClearCacheRequest)) (*Response, error) {
var r = IndicesClearCacheRequest{}
for _, f := range o {
f(&r)
}
return r.Do(r.ctx, t)
}
}
// ----- API Definition -------------------------------------------------------
// IndicesClearCache clears all or specific caches for one or more indices.
//
// See full documentation at http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html.
//
type IndicesClearCache func(o ...func(*IndicesClearCacheRequest)) (*Response, error)
// IndicesClearCacheRequest configures the Indices Clear Cache API request.
//
type IndicesClearCacheRequest struct {
Index []string
AllowNoIndices *bool
ExpandWildcards string
Fielddata *bool
Fields []string
IgnoreUnavailable *bool
Query *bool
Request *bool
Pretty bool
Human bool
ErrorTrace bool
FilterPath []string
ctx context.Context
}
// Do executes the request and returns response or error.
//
func (r IndicesClearCacheRequest) Do(ctx context.Context, transport Transport) (*Response, error) {
var (
method string
path strings.Builder
params map[string]string
)
method = "POST"
path.Grow(1 + len(strings.Join(r.Index, ",")) + 1 + len("_cache") + 1 + len("clear"))
if len(r.Index) > 0 {
path.WriteString("/")
path.WriteString(strings.Join(r.Index, ","))
}
path.WriteString("/")
path.WriteString("_cache")
path.WriteString("/")
path.WriteString("clear")
params = make(map[string]string)
if r.AllowNoIndices != nil {
params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices)
}
if r.ExpandWildcards != "" {
params["expand_wildcards"] = r.ExpandWildcards
}
if r.Fielddata != nil {
params["fielddata"] = strconv.FormatBool(*r.Fielddata)
}
if len(r.Fields) > 0 {
params["fields"] = strings.Join(r.Fields, ",")
}
if r.IgnoreUnavailable != nil {
params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable)
}
if len(r.Index) > 0 {
params["index"] = strings.Join(r.Index, ",")
}
if r.Query != nil {
params["query"] = strconv.FormatBool(*r.Query)
}
if r.Request != nil {
params["request"] = strconv.FormatBool(*r.Request)
}
if r.Pretty {
params["pretty"] = "true"
}
if r.Human {
params["human"] = "true"
}
if r.ErrorTrace {
params["error_trace"] = "true"
}
if len(r.FilterPath) > 0 {
params["filter_path"] = strings.Join(r.FilterPath, ",")
}
req, _ := newRequest(method, path.String(), nil)
if len(params) > 0 {
q := req.URL.Query()
for k, v := range params {
q.Set(k, v)
}
req.URL.RawQuery = q.Encode()
}
if ctx != nil {
req = req.WithContext(ctx)
}
res, err := transport.Perform(req)
if err != nil {
return nil, err
}
response := Response{
StatusCode: res.StatusCode,
Body: res.Body,
Header: res.Header,
}
return &response, nil
}
// WithContext sets the request context.
//
func (f IndicesClearCache) WithContext(v context.Context) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.ctx = v
}
}
// WithIndex - a list of index name to limit the operation.
//
func (f IndicesClearCache) WithIndex(v ...string) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.Index = v
}
}
// WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified).
//
func (f IndicesClearCache) WithAllowNoIndices(v bool) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.AllowNoIndices = &v
}
}
// WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both..
//
func (f IndicesClearCache) WithExpandWildcards(v string) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.ExpandWildcards = v
}
}
// WithFielddata - clear field data.
//
func (f IndicesClearCache) WithFielddata(v bool) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.Fielddata = &v
}
}
// WithFields - a list of fields to clear when using the `fielddata` parameter (default: all).
//
func (f IndicesClearCache) WithFields(v ...string) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.Fields = v
}
}
// WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed).
//
func (f IndicesClearCache) WithIgnoreUnavailable(v bool) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.IgnoreUnavailable = &v
}
}
// WithQuery - clear query caches.
//
func (f IndicesClearCache) WithQuery(v bool) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.Query = &v
}
}
// WithRequest - clear request cache.
//
func (f IndicesClearCache) WithRequest(v bool) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.Request = &v
}
}
// WithPretty makes the response body pretty-printed.
//
func (f IndicesClearCache) WithPretty() func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.Pretty = true
}
}
// WithHuman makes statistical values human-readable.
//
func (f IndicesClearCache) WithHuman() func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.Human = true
}
}
// WithErrorTrace includes the stack trace for errors in the response body.
//
func (f IndicesClearCache) WithErrorTrace() func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.ErrorTrace = true
}
}
// WithFilterPath filters the properties of the response body.
//
func (f IndicesClearCache) WithFilterPath(v ...string) func(*IndicesClearCacheRequest) {
return func(r *IndicesClearCacheRequest) {
r.FilterPath = v
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
alexa/ragglach17-skill-function/environment.py
|
import os
DYNAMODB_TABLE_NAME = os.getenv("DYNAMODB_TABLE_NAME")
POOL_TEMPERATURE_SENSOR_ID = os.getenv("POOL_TEMPERATURE_SENSOR_ID")
|
[] |
[] |
[
"POOL_TEMPERATURE_SENSOR_ID",
"DYNAMODB_TABLE_NAME"
] |
[]
|
["POOL_TEMPERATURE_SENSOR_ID", "DYNAMODB_TABLE_NAME"]
|
python
| 2 | 0 | |
db.go
|
package db
import (
"archive/tar"
"bytes"
"encoding/csv"
"index/suffixarray"
"io"
"log"
"os"
"regexp"
"sort"
"strings"
"github.com/ekzhu/minhash-lsh"
"github.com/sergi/go-diff/diffmatchpatch"
"github.com/madsflensted/go-license-db/assets"
"github.com/madsflensted/go-license-db/fastlog"
"github.com/madsflensted/go-license-db/normalize"
"github.com/madsflensted/go-license-db/wmh"
)
// database holds the license texts, their hashes and the hashtables to query for nearest
// neighbors.
type database struct {
debug bool
// license name -> text
licenseTexts map[string]string
// minimum license text length
minLicenseLength int
// official license URLs
urls map[string]string
// all URLs joined
urlRe *regexp.Regexp
// first line of each license OR-ed - used to split
firstLineRe *regexp.Regexp
// unique unigrams -> index
tokens map[string]int
// document frequencies of the unigrams, indexes match with `tokens`
docfreqs []int
// Weighted MinHash hashtables
lsh *minhashlsh.MinhashLSH
// turns a license text into a hash
hasher *wmh.WeightedMinHasher
// part of license short name (e,g, BSL-1.0) -> list of containing license names
nameShortSubstrings map[string][]substring
// number of substrings per short license name
nameShortSubstringSizes map[string]int
// part of license name (e,g, Boost Software License 1.0) -> list of containing license names
nameSubstrings map[string][]substring
// number of substrings per license name
nameSubstringSizes map[string]int
}
type substring struct {
value string
count int
}
const (
numHashes = 154
similarityThreshold = 0.75
)
// Load internal db
func LoadDB() *database {
return loadLicenses()
}
// Length returns the number of registered licenses.
func (db database) Length() int {
return len(db.licenseTexts)
}
// List of known license texts/names
func (db database) LicenseNames() []string {
keys := make([]string, 0, len(db.licenseTexts))
for k := range db.licenseTexts {
keys = append(keys, k)
}
return keys
}
// VocabularySize returns the number of unique unigrams.
func (db database) VocabularySize() int {
return len(db.tokens)
}
func loadUrls(db *database) {
urlCSVBytes, err := assets.Asset("urls.csv")
if err != nil {
log.Fatalf("failed to load urls.csv from the assets: %v", err)
}
urlReader := csv.NewReader(bytes.NewReader(urlCSVBytes))
records, err := urlReader.ReadAll()
if err != nil || len(records) == 0 {
log.Fatalf("failed to parse urls.csv from the assets: %v", err)
}
db.urls = map[string]string{}
urlReWriter := &bytes.Buffer{}
for i, record := range records {
db.urls[record[1]] = record[0]
urlReWriter.Write([]byte(regexp.QuoteMeta(record[1])))
if i < len(records)-1 {
urlReWriter.WriteRune('|')
}
}
db.urlRe = regexp.MustCompile(urlReWriter.String())
}
func loadNames(db *database) {
namesBytes, err := assets.Asset("names.csv")
if err != nil {
log.Fatalf("failed to load banes.csv from the assets: %v", err)
}
namesReader := csv.NewReader(bytes.NewReader(namesBytes))
records, err := namesReader.ReadAll()
if err != nil || len(records) == 0 {
log.Fatalf("failed to parse names.csv from the assets: %v", err)
}
db.nameSubstringSizes = map[string]int{}
db.nameSubstrings = map[string][]substring{}
for _, record := range records {
registerNameSubstrings(record[1], record[0], db.nameSubstringSizes, db.nameSubstrings)
}
}
func registerNameSubstrings(
name string, key string, sizes map[string]int, substrs map[string][]substring) {
parts := splitLicenseName(name)
sizes[key] = 0
for _, part := range parts {
if licenseReadmeRe.MatchString(part.value) {
continue
}
sizes[key]++
list := substrs[part.value]
if list == nil {
list = []substring{}
}
list = append(list, substring{value: key, count: part.count})
substrs[part.value] = list
}
}
// Load takes the licenses from the embedded storage, normalizes, hashes them and builds the
// LSH hashtables.
func loadLicenses() *database {
db := &database{}
if os.Getenv("LICENSE_DEBUG") != "" {
db.debug = true
}
loadUrls(db)
loadNames(db)
tarBytes, err := assets.Asset("licenses.tar")
if err != nil {
log.Fatalf("failed to load licenses.tar from the assets: %v", err)
}
tarStream := bytes.NewBuffer(tarBytes)
archive := tar.NewReader(tarStream)
db.licenseTexts = map[string]string{}
tokenFreqs := map[string]map[string]int{}
firstLineWriter := &bytes.Buffer{}
firstLineWriter.WriteString("(^|\\n)((.*licen[cs]e\\n\\n)|(")
for header, err := archive.Next(); err != io.EOF; header, err = archive.Next() {
if len(header.Name) <= 6 {
continue
}
key := header.Name[2 : len(header.Name)-4]
text := make([]byte, header.Size)
readSize, readErr := archive.Read(text)
if readErr != nil && readErr != io.EOF {
log.Fatalf("failed to load licenses.tar from the assets: %s: %v", header.Name, readErr)
}
if int64(readSize) != header.Size {
log.Fatalf("failed to load licenses.tar from the assets: %s: incomplete read", header.Name)
}
normedText := normalize.LicenseText(string(text), normalize.Moderate)
if db.minLicenseLength == 0 || db.minLicenseLength > len(normedText) {
db.minLicenseLength = len(normedText)
}
db.licenseTexts[key] = normedText
newLinePos := strings.Index(normedText, "\n")
if newLinePos >= 0 {
firstLineWriter.WriteString(regexp.QuoteMeta(normedText[:newLinePos]))
firstLineWriter.WriteRune('|')
}
normedText = normalize.Relax(normedText)
lines := strings.Split(normedText, "\n")
myUniqueTokens := map[string]int{}
tokenFreqs[key] = myUniqueTokens
for _, line := range lines {
tokens := strings.Split(line, " ")
for _, token := range tokens {
myUniqueTokens[token]++
}
}
}
if db.debug {
log.Println("Minimum license length:", db.minLicenseLength)
log.Println("Number of supported licenses:", len(db.licenseTexts))
}
firstLineWriter.Truncate(firstLineWriter.Len() - 1)
firstLineWriter.WriteString("))")
db.firstLineRe = regexp.MustCompile(firstLineWriter.String())
docfreqs := map[string]int{}
for _, tokens := range tokenFreqs {
for token := range tokens {
docfreqs[token]++
}
}
uniqueTokens := make([]string, len(docfreqs))
{
i := 0
for token := range docfreqs {
uniqueTokens[i] = token
i++
}
}
sort.Strings(uniqueTokens)
db.tokens = map[string]int{}
db.docfreqs = make([]int, len(uniqueTokens))
for i, token := range uniqueTokens {
db.tokens[token] = i
db.docfreqs[i] = docfreqs[token]
}
db.lsh = minhashlsh.NewMinhashLSH64(numHashes, similarityThreshold)
if db.debug {
k, l := db.lsh.Params()
log.Println("LSH:", k, l)
}
db.hasher = wmh.NewWeightedMinHasher(len(uniqueTokens), numHashes, 7)
db.nameShortSubstrings = map[string][]substring{}
db.nameShortSubstringSizes = map[string]int{}
for key, tokens := range tokenFreqs {
indices := make([]int, len(tokens))
values := make([]float32, len(tokens))
{
i := 0
for t, freq := range tokens {
indices[i] = db.tokens[t]
values[i] = tfidf(freq, db.docfreqs[indices[i]], len(db.licenseTexts))
i++
}
}
db.lsh.Add(key, db.hasher.Hash(values, indices))
registerNameSubstrings(key, key, db.nameShortSubstringSizes, db.nameShortSubstrings)
}
db.lsh.Index()
return db
}
// QueryLicenseText returns the most similar registered licenses.
func (db *database) QueryLicenseText(text string) map[string]float32 {
parts := normalize.Split(text)
licenses := map[string]float32{}
for _, part := range parts {
for key, val := range db.queryLicenseAbstract(part) {
if licenses[key] < val {
licenses[key] = val
}
}
}
return licenses
}
func (db *database) queryLicenseAbstract(text string) map[string]float32 {
normalizedModerate := normalize.LicenseText(text, normalize.Moderate)
titlePositions := db.firstLineRe.FindAllStringIndex(normalizedModerate, -1)
candidates := db.queryLicenseAbstractNormalized(normalizedModerate)
var prevPos int
var prevMatch string
for i, titlePos := range titlePositions {
begPos := titlePos[0]
match := normalizedModerate[titlePos[0]:titlePos[1]]
if match[0] == '\n' {
match = match[1:]
}
if match == prevMatch {
begPos = prevPos
}
if normalizedModerate[begPos] == '\n' {
begPos++
}
var endPos int
if i < len(titlePositions)-1 {
endPos = titlePositions[i+1][0]
} else {
endPos = len(normalizedModerate)
}
part := normalizedModerate[begPos:endPos]
prevMatch = match
prevPos = begPos
if float64(len(part)) < float64(db.minLicenseLength)*similarityThreshold {
continue
}
newCandidates := db.queryLicenseAbstractNormalized(part)
if len(newCandidates) == 0 {
continue
}
for key, val := range newCandidates {
if candidates[key] < val {
candidates[key] = val
}
}
}
db.addURLMatches(candidates, text)
return candidates
}
func (db *database) addURLMatches(candidates map[string]float32, text string) {
for key := range db.scanForURLs(text) {
if db.debug {
println("URL:", key)
}
if conf := candidates[key]; conf < similarityThreshold {
if conf == 0 {
candidates[key] = 1
} else {
candidates[key] = similarityThreshold
}
}
}
}
func (db *database) queryLicenseAbstractNormalized(normalizedModerate string) map[string]float32 {
normalizedRelaxed := normalize.Relax(normalizedModerate)
if db.debug {
println("\nqueryAbstractNormed --------\n")
println(normalizedModerate)
println("\n========\n")
println(normalizedRelaxed)
}
tokens := map[int]int{}
for _, line := range strings.Split(normalizedRelaxed, "\n") {
for _, token := range strings.Split(line, " ") {
if index, exists := db.tokens[token]; exists {
tokens[index]++
}
}
}
indices := make([]int, len(tokens))
values := make([]float32, len(tokens))
{
i := 0
for key, val := range tokens {
indices[i] = key
values[i] = tfidf(val, db.docfreqs[key], len(db.licenseTexts))
i++
}
}
found := db.lsh.Query(db.hasher.Hash(values, indices))
candidates := map[string]float32{}
if len(found) == 0 {
return candidates
}
for _, keyint := range found {
key := keyint.(string)
licenseText := db.licenseTexts[key]
yourRunes := make([]rune, 0, len(licenseText)/6)
vocabulary := map[string]int{}
for _, line := range strings.Split(licenseText, "\n") {
for _, token := range strings.Split(line, " ") {
index, exists := vocabulary[token]
if !exists {
index = len(vocabulary)
vocabulary[token] = index
}
yourRunes = append(yourRunes, rune(index))
}
}
oovRune := rune(len(vocabulary))
myRunes := make([]rune, 0, len(normalizedModerate)/6)
for _, line := range strings.Split(normalizedModerate, "\n") {
for _, token := range strings.Split(line, " ") {
if index, exists := vocabulary[token]; exists {
myRunes = append(myRunes, rune(index))
} else if len(myRunes) == 0 || myRunes[len(myRunes)-1] != oovRune {
myRunes = append(myRunes, oovRune)
}
}
}
dmp := diffmatchpatch.New()
diff := dmp.DiffMainRunes(myRunes, yourRunes, false)
if db.debug {
tokarr := make([]string, len(db.tokens)+1)
for key, val := range vocabulary {
tokarr[val] = key
}
tokarr[len(db.tokens)] = "!"
println(dmp.DiffPrettyText(dmp.DiffCharsToLines(diff, tokarr)))
}
distance := dmp.DiffLevenshtein(diff)
candidates[key] = float32(1) - float32(distance)/float32(len(myRunes))
}
weak := make([]string, 0, len(candidates))
for key, val := range candidates {
if val < similarityThreshold {
weak = append(weak, key)
}
}
if len(weak) < len(candidates) {
for _, key := range weak {
delete(candidates, key)
}
}
return candidates
}
func (db *database) scanForURLs(text string) map[string]bool {
byteText := []byte(text)
index := suffixarray.New(byteText)
urlMatches := index.FindAllIndex(db.urlRe, -1)
licenses := map[string]bool{}
for _, match := range urlMatches {
url := string(byteText[match[0]:match[1]])
licenses[db.urls[url]] = true
}
return licenses
}
func tfidf(freq int, docfreq int, ndocs int) float32 {
weight := fastlog.Log(1+float32(freq)) * fastlog.Log(float32(ndocs)/float32(docfreq))
if weight < 0 {
// logarithm is approximate
return 0
}
return weight
}
|
[
"\"LICENSE_DEBUG\""
] |
[] |
[
"LICENSE_DEBUG"
] |
[]
|
["LICENSE_DEBUG"]
|
go
| 1 | 0 | |
appengine/sendgrid/src/main/java/com/example/appengine/sendgrid/SendEmailServlet.java
|
/*
* Copyright 2015 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.appengine.sendgrid;
import com.sendgrid.SendGrid;
import com.sendgrid.SendGridException;
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
// [START example]
@SuppressWarnings("serial")
public class SendEmailServlet extends HttpServlet {
@Override
public void service(HttpServletRequest req, HttpServletResponse resp) throws IOException,
ServletException {
final String sendgridApiKey = System.getenv("SENDGRID_API_KEY");
final String sendgridSender = System.getenv("SENDGRID_SENDER");
final String toEmail = req.getParameter("to");
if (toEmail == null) {
resp.getWriter()
.print("Please provide an email address in the \"to\" query string parameter.");
return;
}
SendGrid sendgrid = new SendGrid(sendgridApiKey);
SendGrid.Email email = new SendGrid.Email();
email.addTo(toEmail);
email.setFrom(sendgridSender);
email.setSubject("This is a test email");
email.setText("Example text body.");
try {
SendGrid.Response response = sendgrid.send(email);
if (response.getCode() != 200) {
resp.getWriter().print(String.format("An error occurred: %s", response.getMessage()));
return;
}
resp.getWriter().print("Email sent.");
} catch (SendGridException e) {
throw new ServletException("SendGrid error", e);
}
}
}
// [END example]
|
[
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_SENDER\""
] |
[] |
[
"SENDGRID_API_KEY",
"SENDGRID_SENDER"
] |
[]
|
["SENDGRID_API_KEY", "SENDGRID_SENDER"]
|
java
| 2 | 0 | |
drivers/vmwarefusion/vmrun_darwin.go
|
/*
* Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License.
*/
package vmwarefusion
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"strings"
"github.com/docker/machine/log"
)
var (
vmrunbin = "/Applications/VMware Fusion.app/Contents/Library/vmrun"
vdiskmanbin = "/Applications/VMware Fusion.app/Contents/Library/vmware-vdiskmanager"
)
var (
ErrMachineExist = errors.New("machine already exists")
ErrMachineNotExist = errors.New("machine does not exist")
ErrVMRUNNotFound = errors.New("VMRUN not found")
)
func vmrun(args ...string) (string, string, error) {
cmd := exec.Command(vmrunbin, args...)
if os.Getenv("DEBUG") != "" {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout, cmd.Stderr = &stdout, &stderr
log.Debugf("executing: %v %v", vmrunbin, strings.Join(args, " "))
err := cmd.Run()
if err != nil {
if ee, ok := err.(*exec.Error); ok && ee == exec.ErrNotFound {
err = ErrVMRUNNotFound
}
}
return stdout.String(), stderr.String(), err
}
// Make a vmdk disk image with the given size (in MB).
func vdiskmanager(dest string, size int) error {
cmd := exec.Command(vdiskmanbin, "-c", "-t", "0", "-s", fmt.Sprintf("%dMB", size), "-a", "lsilogic", dest)
if os.Getenv("DEBUG") != "" {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
if stdout := cmd.Run(); stdout != nil {
if ee, ok := stdout.(*exec.Error); ok && ee == exec.ErrNotFound {
return ErrVMRUNNotFound
}
}
return nil
}
|
[
"\"DEBUG\"",
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
back-end/source/elevateme-back-end.py
|
import boto3
import json
from decimal import *
import decimal
import uuid
import time
import ldap3
import os
from base64 import b64decode
from datetime import datetime, timedelta
from botocore.vendored import requests
# Environments
webhook = os.environ['WEBHOOK']
ldap_server = os.environ['LDAPSERVER']
ldap_user = os.environ['LDAPUSER']
ldap_password = os.environ['LDAPPASSWORD']
region = os.environ['REGION']
db_table = os.environ['DBTABLE']
# AWS Details
dynamodb = boto3.resource('dynamodb', region_name=f'{region}')
table = dynamodb.Table(f'{db_table}')
# ###### Get encrypted password from SSM instead of passing in os.environment parameter ########
# ssm_client = boto3.client('ssm')
# x = ssm_client.get_parameter(Name='YOUR_TAG_NAME', WithDecryption=True)
# ldap_password = x['Parameter']['Value']
# Convert slack name to AD admin account name.
allowed_users = {
'matt.tunny': 'Tunny.Admin',
'john.lewis': 'Lewis.Admin',
'john.doh': 'Doh.Admin',
'daniel.bobbie': 'Bobbie.Admin',
'reece.smith': 'Smith.Admin'
}
allowed_groups = {
'Domain Admins', # Domain Admins
'DMZ-Server-Admins', # example group for custom dmz servers
'Schema Admins', # Scheme Admins
'AD-ExchangeSearchAdmins', # example group for exchange search rights.
'AuditServers', # example group for audit servers
'AWS-CloudAdmins' # example group for Cloud Admins
}
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
# Add user from AD Group
def add_user_from_adgroup(ldap_server, ldap_user, ldap_password, user, group):
server = ldap3.Server(f'{ldap_server}')
conn = ldap3.Connection(server, ldap_user, ldap_password, auto_bind=True)
conn.extend.microsoft.add_members_to_groups(f'cn={user},ou=Users,ou=corporate,dc=corporate,dc=internal', f'cn={group},cn=Users,dc=corporate,dc=internal')
conn.unbind()
# Get details from SQS Message
def sqs_event(event):
print('Running SQS Function...')
body = {
"message": "Elevate Me Message...",
"event": event
}
print(json.dumps(body))
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
# Update DynamoDB
def update_dynamodb(event):
print('running update_dynamodb function....')
# Current Time + TTL time to expire dynamodb records after 2 hours + UUID
time_when_elevated = int(time.time())
time_now = datetime.now()
human_time = '{:%H:%M:%S}'.format(time_now)
# Revoke at
revoke_at = time_when_elevated + 3600 #3600
human_revoke_at = time_now + timedelta(hours=1)
revoke_human_time = '{:%H:%M:%S}'.format(human_revoke_at)
random_id = uuid.uuid4()
user = event['Records'][0]['messageAttributes']['User']['stringValue']
ad_user = allowed_users[f'{user}']
adgroup = event['Records'][0]['messageAttributes']['Group']['stringValue']
print(f'User = {ad_user}')
print(f'Group = {adgroup}')
print(f'Time when Elevated = {time_when_elevated}')
print(f'Revoke = {revoke_at}')
# Push DynamoDB
response = table.update_item(
Key={
'Id': f'{random_id}'
},
UpdateExpression="set #user = :user, #adgroup = :adgroup, #time_when_elevated = :time_when_elevated, #revoke_at=:revoke_at, #revoke_at_friendly=:revoke_at_friendly, #elevated_time_friendly=:elevated_time_friendly",
ExpressionAttributeNames={
'#user': 'User',
'#adgroup': 'ADgroup',
'#time_when_elevated': 'TimeWhenElevated',
'#revoke_at': 'RevokeAt',
'#revoke_at_friendly': 'RevokeAtFriendly',
'#elevated_time_friendly': 'ElevatedTimeFriendly'
},
ExpressionAttributeValues={
':user': ad_user,
':adgroup': adgroup,
':time_when_elevated': time_when_elevated,
':revoke_at': revoke_at,
':revoke_at_friendly': revoke_human_time,
':elevated_time_friendly': human_time
},
ReturnValues="UPDATED_NEW"
)
print(json.dumps(response, indent=4, cls=DecimalEncoder))
def lambda_handler(event, context):
# Read SQS event
sqs_event(event)
# Confirm user and group are allowed to be Elevated.
group_on_queue = event['Records'][0]['messageAttributes']['Group']['stringValue']
user_on_queue = event['Records'][0]['messageAttributes']['User']['stringValue']
ad_user_on_queue = allowed_users[f'{user_on_queue}']
if group_on_queue in allowed_groups and user_on_queue in allowed_users.keys():
print('User and group allowed to continue')
# Scan DynamoDB for current Elevated users before adding users (stops spam Elevating)
print('scanning dynamodb table for current elveated users...')
dbresponse = table.scan()
items = dbresponse['Items']
if len(items) > 0:
current_users = []
current_groups = []
current_revoke = []
for i in items:
current_users.append(i['User'])
current_groups.append(i['ADgroup'])
current_revoke.append(i['RevokeAt'])
# Check user isn't already elevated.
if group_on_queue in current_groups and ad_user_on_queue in current_users:
print('skipping as user already in group with time to spare...')
response = requests.post(webhook, data=json.dumps({'text': ad_user_on_queue + ' is already elevated in ' + group_on_queue + ' ....' }))
else:
# User not in table, adding...
print('adding user to group....')
try:
print('Trying to add user to AD group...')
add_user_from_adgroup(ldap_server, ldap_user, ldap_password, ad_user_on_queue, group_on_queue)
response = requests.post(webhook, data=json.dumps({'text': ad_user_on_queue + ' elevated into ' + group_on_queue + ' ....' }))
try:
print('trying to add user to dynamodb...')
update_dynamodb(event)
except Exception as error:
print('Failed to update DynamoDB Table....')
print(error)
response = requests.post(webhook, data=json.dumps({'text': f'{error}' }))
except Exception as error:
print('Failed to Add user to AD Group....')
print(error)
response = requests.post(webhook, data=json.dumps({'text': f'{error}' }))
else:
# Table empty, adding user...
print('DynamoDB Table is empty, elevate new user.')
try:
print('Trying to add user to AD group...')
add_user_from_adgroup(ldap_server, ldap_user, ldap_password, ad_user_on_queue, group_on_queue)
response = requests.post(webhook, data=json.dumps({'text': ad_user_on_queue + ' elevated into ' + group_on_queue + ' ....' }))
try:
print('trying to add user to dynamodb...')
update_dynamodb(event)
except Exception as error:
print('Failed to update DynamoDB Table....')
print(error)
response = requests.post(webhook, data=json.dumps({'text': f'{error}' }))
except Exception as error:
print('Failed to Add user to AD Group....')
print(error)
response = requests.post(webhook, data=json.dumps({'text': f'{error}' }))
else:
# User or Group not on the list baby!
print('user or group not allowed to elevate')
response = requests.post(webhook, data=json.dumps({'text': '*Failed to Elevate* ' + ad_user_on_queue + ' from: ' + group_on_queue + ' ....User or group not in allow list.' }))
|
[] |
[] |
[
"LDAPSERVER",
"WEBHOOK",
"LDAPUSER",
"REGION",
"LDAPPASSWORD",
"DBTABLE"
] |
[]
|
["LDAPSERVER", "WEBHOOK", "LDAPUSER", "REGION", "LDAPPASSWORD", "DBTABLE"]
|
python
| 6 | 0 | |
sample_octopus_frontend_service/src/sample_octopus_frontend/restful/jsoninfo.go
|
// Copyright 2019 The VikingBays(in Nanjing , China) . All rights reserved.
// Released under the Apache license : http://www.apache.org/licenses/LICENSE-2.0 .
//
// authors: VikingBays
// email : [email protected]
package restful
import (
"alphabet/log4go"
"alphabet/service"
"alphabet/web"
"fmt"
"os"
"sample_octopus_api/restful/api"
)
/**
* 访问方式: http://[ip]:[port]/[webcontext]/restful/jsoninfo/0/1000
*
*/
type ParamJson struct {
Min int
Max int
}
func JsonInfo(paramJson *ParamJson, context *web.Context) {
log4go.InfoLog("paramJson=%v", paramJson)
if paramJson != nil {
userInfoRespBody := api.UserInfoRespBody{}
paramJson_Req := api.ParamJson_Req{Min: paramJson.Min, Max: paramJson.Max}
//err3 := service.AskJson_MS("group_octopus01", "restful_jsoninfo", fmt.Sprintf("min=%d&max=%d", paramJson.Min, paramJson.Max), &userInfoRespBody)
err3 := service.AskJson_MS("group_octopus01", "restful_jsoninfo", ¶mJson_Req, &userInfoRespBody)
if err3 != nil {
log4go.ErrorLog(err3)
}
log4go.InfoLog("userInfoRespBody:", userInfoRespBody)
context.Return.Json(userInfoRespBody)
} else {
context.Return.Json(map[string]interface{}{
"err": "没有min和max参数。请按照此格式请求: http://[ip]:[port]/[webcontext]/restful/jsoninfo/{min}/{max} 。"})
}
}
func JsonIndex(context *web.Context) {
x1 := os.Getenv("x1")
x2 := os.Getenv("x2")
returnData := fmt.Sprintf("x1: %s , x2: %s .", x1, x2)
//context.Return.Forward("json_index", nil)
context.Return.Forward("json_index", returnData)
}
|
[
"\"x1\"",
"\"x2\""
] |
[] |
[
"x2",
"x1"
] |
[]
|
["x2", "x1"]
|
go
| 2 | 0 | |
build/zbi/verify_zbi_kernel_cmdline_test.py
|
#!/usr/bin/env python3.8
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit test for verify_zbi_kernel_cmdline.py.
Need to have SCRUTINY and ZBI environmental variables set.
To manually run this test:
SCRUTINY=~/fuchsia/out/default/host_x64/scrutiny \
ZBI=~/fuchsia/out/default/host_x64/zbi python3 \
verify_zbi_kernel_cmdline_test.py
"""
import os
import sys
import subprocess
import tempfile
import unittest
import unittest.mock as mock
import verify_zbi_kernel_cmdline
def verify_kernel_cmdline(golden, actual):
with tempfile.TemporaryDirectory() as test_folder:
golden_file = os.path.join(test_folder, 'golden')
stamp_file = os.path.join(test_folder, 'stamp')
fuchsia_folder = os.path.join(test_folder, 'fuchsia')
test_zbi = os.path.join(test_folder, 'test.zbi')
cmdline_file = os.path.join(test_folder, 'cmdline')
scrutiny = os.environ['SCRUTINY']
with open(golden_file, 'w+') as f:
f.write(golden)
with open(cmdline_file, 'wb+') as f:
f.write(actual)
# Use ZBI to create a test.zbi that only contains cmdline.
subprocess.check_call(
[os.environ['ZBI'], '-o', test_zbi, '-T', 'CMDLINE', cmdline_file])
os.mkdir(fuchsia_folder)
args = [
'--zbi-file', test_zbi, '--scrutiny', scrutiny, '--fuchsia-dir',
fuchsia_folder, '--kernel-cmdline-golden-file', golden_file, '--stamp',
stamp_file
]
# Verify the cmdline in the generated ZBI.
return verify_zbi_kernel_cmdline.main(args)
class RunVerifyZbiKernelCmdlineTest(unittest.TestCase):
def test_verify_kernel_cmdline_sucess_normal_case(self):
self.assertEqual(
0,
verify_kernel_cmdline('key1=v1\nkey2=v2\nkey3=v3',
b'key1=v1 key2=v2 key3=v3'))
def test_verify_kernel_cmdline_success_order_diff(self):
self.assertEqual(
0,
verify_kernel_cmdline('key1=v1\nkey2=v2\nkey3=v3',
b'key2=v2 key1=v1 key3=v3'))
def test_verify_kernel_cmdline_success_no_value_option(self):
self.assertEqual(
0, verify_kernel_cmdline('option1\noption2', b'option1 option2'))
def test_verify_kernel_cmdline_fail_golden_empty(self):
self.assertEqual(-1, verify_kernel_cmdline('', b'key2=v2 key1=v1 key3=v3'))
def test_verify_kernel_cmdline_fail_missing_key2(self):
self.assertEqual(-1, verify_kernel_cmdline('key1=v1\nkey2=v2', b'key1=v1'))
def test_verify_kernel_cmdline_fail_key1_mismatch(self):
self.assertEqual(
-1, verify_kernel_cmdline('key1=v1\nkey2=v2', b'key1=v2 key2=v2'))
def test_verify_kernel_cmdline_fail_key2_mismatch(self):
self.assertEqual(
-1, verify_kernel_cmdline('key1=v1\nkey2=v2', b'key1=v1 key2=v1'))
def test_verify_kernel_cmdline_fail_additional_key3(self):
self.assertEqual(
-1, verify_kernel_cmdline('key1=v1\nkey2=v2',
b'key1=v1 key2=v2 key3=v3'))
def test_verify_kernel_cmdline_fail_invalid_format(self):
self.assertEqual(
-1, verify_kernel_cmdline('key1=v1\nkey2=v2', b'invalid=format=1'))
def test_verify_kernel_cmdline_fail_option1_missing(self):
self.assertEqual(-1, verify_kernel_cmdline('option1\noption2', b'option2'))
def test_verify_kernel_cmdline_fail_additional_option3(self):
self.assertEqual(
-1, verify_kernel_cmdline('option1\noption2',
b'option1 option2 option3'))
def test_verify_kernel_cmdline_zbi_not_found(self):
with tempfile.TemporaryDirectory() as test_folder:
golden_file = os.path.join(test_folder, 'golden')
stamp_file = os.path.join(test_folder, 'stamp')
fuchsia_folder = os.path.join(test_folder, 'fuchsia')
test_zbi = os.path.join(test_folder, 'test.zbi')
scrutiny = os.environ['SCRUTINY']
with open(golden_file, 'w+') as f:
f.write('option1')
# Do not create test_zbi
os.mkdir(fuchsia_folder)
args = [
'--zbi-file', test_zbi, '--scrutiny', scrutiny, '--fuchsia-dir',
fuchsia_folder, '--kernel-cmdline-golden-file', golden_file,
'--stamp', stamp_file
]
self.assertEqual(-1, verify_zbi_kernel_cmdline.main(args))
def test_verify_kernel_cmdline_success_no_cmdline_found(self):
with tempfile.TemporaryDirectory() as test_folder:
golden_file = os.path.join(test_folder, 'golden')
stamp_file = os.path.join(test_folder, 'stamp')
fuchsia_folder = os.path.join(test_folder, 'fuchsia')
test_zbi = os.path.join(test_folder, 'test.zbi')
scrutiny = os.environ['SCRUTINY']
# Create an empty golden file
with open(golden_file, 'w+') as f:
f.write('')
# Use ZBI to create a test.zbi with no cmdline.
subprocess.check_call([os.environ['ZBI'], '-o', test_zbi])
os.mkdir(fuchsia_folder)
args = [
'--zbi-file', test_zbi, '--scrutiny', scrutiny, '--fuchsia-dir',
fuchsia_folder, '--kernel-cmdline-golden-file', golden_file,
'--stamp', stamp_file
]
self.assertEqual(0, verify_zbi_kernel_cmdline.main(args))
def test_verify_kernel_cmdline_fail_golden_empty_cmdline_found(self):
self.assertEqual(-1, verify_kernel_cmdline('', b'option2'))
def test_verify_kernel_cmdline_fail_golden_not_empty_cmdline_not_found(self):
with tempfile.TemporaryDirectory() as test_folder:
golden_file = os.path.join(test_folder, 'golden')
stamp_file = os.path.join(test_folder, 'stamp')
fuchsia_folder = os.path.join(test_folder, 'fuchsia')
test_zbi = os.path.join(test_folder, 'test.zbi')
scrutiny = os.environ['SCRUTINY']
# Create an empty golden file
with open(golden_file, 'w+') as f:
f.write('option1')
# Use ZBI to create a test.zbi with no cmdline.
subprocess.check_call([os.environ['ZBI'], '-o', test_zbi])
os.mkdir(fuchsia_folder)
args = [
'--zbi-file', test_zbi, '--scrutiny', scrutiny, '--fuchsia-dir',
fuchsia_folder, '--kernel-cmdline-golden-file', golden_file,
'--stamp', stamp_file
]
self.assertEqual(-1, verify_zbi_kernel_cmdline.main(args))
if __name__ == '__main__':
if 'SCRUTINY' not in os.environ or 'ZBI' not in os.environ:
print('Please set SCRUTINY and ZBI environmental path')
sys.exit(-1)
unittest.main()
|
[] |
[] |
[
"SCRUTINY",
"ZBI"
] |
[]
|
["SCRUTINY", "ZBI"]
|
python
| 2 | 0 | |
nerdlandbot/commands/recipe.py
|
import typing
import discord
from discord.ext import commands
import gspread
import os
import asyncio
from datetime import datetime
from nerdlandbot.translations.Translations import get_text as translate
from nerdlandbot.helpers.TranslationHelper import get_culture_from_context as culture
from nerdlandbot.helpers.constants import NOTIFY_EMBED_COLOR
class Recipe(commands.Cog, name="Spreadsheets"):
def __init__(self, bot: commands.Bot):
self.sheets_token = os.getenv("SHEETS_JSON")
self.spreadsheet = os.getenv("SPREADSHEET")
self.bot = bot
@commands.command(name="add_recipe", aliases=["recipe"], brief="recipe_brief", help="recipe_help")
async def add_recipe(self, ctx: commands.Context):
# Getting everything ready to acces
lang = await culture(ctx)
try:
gc = gspread.service_account(self.sheets_token)
sh = gc.open(self.spreadsheet)
except:
msg = translate("recipe_verification_error", lang)
return await ctx.send(msg)
ws = sh.sheet1
next_row = next_available_row(ws)
# Fetching date and formatting it
d_obj = datetime.now()
date_string = "{}/{}/{} {}:{}:{}".format(d_obj.month, d_obj.day, d_obj.year, d_obj.hour, d_obj.minute, d_obj.second)
# Initializing variables needed
embed_title = translate("recipe_title", lang)
questions = []
answers = []
# Asking the user questions and capturing the answer
for i in range(5):
questions.append(translate("recipe_template", lang).format(translate("recipe_{}_question".format(i+1), lang)))
embed = discord.Embed(
title = embed_title,
description = questions[i],
color = NOTIFY_EMBED_COLOR
)
await ctx.send(embed=embed)
try:
reaction = await ctx.bot.wait_for("message", timeout=30, check=check(ctx.author))
await asyncio.sleep(1)
answers.append(reaction)
# If the user wants to abort, he can enter '0'
if reaction.content == "0":
abort = translate("recipe_abort", lang)
embed = discord.Embed(
title = embed_title,
description = abort,
color = NOTIFY_EMBED_COLOR
)
return await ctx.send(embed=embed)
# When the user is asked a rating, check if it's a number between 1 and 5
if i == 2:
if reaction.content.isnumeric():
reaction_int = int(reaction.content)
if reaction_int > 5 or reaction_int < 1:
rating_error = translate("recipe_rating_error", lang)
embed = discord.Embed(
title = embed_title,
description = rating_error,
color = NOTIFY_EMBED_COLOR
)
return await ctx.send(embed=embed)
else:
int_error = translate("recipe_int_error", lang)
embed = discord.Embed(
title = embed_title,
description = int_error,
color = NOTIFY_EMBED_COLOR
)
return await ctx.send(embed=embed)
except asyncio.TimeoutError:
timeout = translate("recipe_timeout", lang)
embed = discord.Embed(
title = embed_title,
description = timeout,
color = NOTIFY_EMBED_COLOR
)
return await ctx.send(embed=embed)
# Let the user know the process has completed and he/she/it just has to wait now
processing = translate("recipe_processing", lang)
embed = discord.Embed(
title = embed_title,
description = processing,
color = NOTIFY_EMBED_COLOR
)
msg = await ctx.send(embed=embed)
# Updating the worksheet(ws) with all the data asked of the user
ws.update("A{}".format(next_row), date_string)
ws.format("A{}".format(next_row), {"horizontalAlignment": "RIGHT"})
ws.update("B{}".format(next_row), answers[0].content)
ws.format("B{}".format(next_row), {"textFormat": {"bold": True}})
ws.update("C{}".format(next_row), answers[1].content)
ws.update("D{}".format(next_row), int(answers[2].content))
ws.update("E{}".format(next_row), answers[3].content)
ws.update("F{}".format(next_row), answers[4].content)
# On a delay of 5 seconds (google isn't instant) check if the last cell was added
await asyncio.sleep(5)
if not ws.acell("F{}".format(next_row)).value:
error_msg = translate("recipe_error", lang)
embed = discord.Embed(
title = embed_title,
description = error_msg,
color = NOTIFY_EMBED_COLOR
)
await ctx.send(embed=embed)
else:
succes_msg = translate("recipe_succes", lang)
embed = discord.Embed(
title = embed_title,
description = succes_msg,
color = NOTIFY_EMBED_COLOR
)
await ctx.send(embed=embed)
# Removing the processing message
return await msg.delete()
def next_available_row(worksheet) -> str:
"""
Returning the next available row
:param worksheet: The worksheet you're working on
:return: The index of the next available column
"""
str_list = list(filter(None, worksheet.col_values(1)))
return str(len(str_list)+1)
def check(author):
def inner_check(message):
return message.author == author
return inner_check
def setup(bot: commands.Bot):
bot.add_cog(Recipe(bot))
|
[] |
[] |
[
"SPREADSHEET",
"SHEETS_JSON"
] |
[]
|
["SPREADSHEET", "SHEETS_JSON"]
|
python
| 2 | 0 | |
sdk/servicebus/azure-servicebus/samples/async_samples/receive_deadlettered_messages_async.py
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Example to show receiving dead-lettered messages from a Service Bus Queue asynchronously.
"""
# pylint: disable=C0111
import os
import asyncio
from azure.servicebus import Message
from azure.servicebus.aio import ServiceBusClient
CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR']
QUEUE_NAME = os.environ["SERVICE_BUS_QUEUE_NAME"]
async def main():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)
async with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
messages = [Message("Message to be deadlettered") for _ in range(10)]
async with sender:
await sender.send_messages(messages)
print('dead lettering messages')
receiver = servicebus_client.get_queue_receiver(queue_name=QUEUE_NAME)
async with receiver:
received_msgs = await receiver.receive_messages(max_batch_size=10, max_wait_time=5)
for msg in received_msgs:
print(str(msg))
await msg.dead_letter()
print('receiving deadlettered messages')
dlq_receiver = servicebus_client.get_queue_deadletter_receiver(queue_name=QUEUE_NAME, prefetch=10)
async with dlq_receiver:
received_msgs = await dlq_receiver.receive_messages(max_batch_size=10, max_wait_time=5)
for msg in received_msgs:
print(str(msg))
await msg.complete()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
[] |
[] |
[
"SERVICE_BUS_CONNECTION_STR",
"SERVICE_BUS_QUEUE_NAME"
] |
[]
|
["SERVICE_BUS_CONNECTION_STR", "SERVICE_BUS_QUEUE_NAME"]
|
python
| 2 | 0 | |
vendor/github.com/docker/docker/rootless/rootless.go
|
package rootless
import (
"os"
"sync"
)
var (
runningWithNonRootUsername bool
runningWithNonRootUsernameOnce sync.Once
)
// RunningWithNonRootUsername returns true if we $USER is set to a non-root value,
// regardless to the UID/EUID value.
//
// The value of this variable is mostly used for configuring default paths.
// If the value is true, $HOME and $XDG_RUNTIME_DIR should be honored for setting up the default paths.
// If false (not only EUID==0 but also $USER==root), $HOME and $XDG_RUNTIME_DIR should be ignored
// even if we are in a user namespace.
func RunningWithNonRootUsername() bool {
runningWithNonRootUsernameOnce.Do(func() {
u := os.Getenv("USER")
runningWithNonRootUsername = u != "" && u != "root"
})
return runningWithNonRootUsername
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
examples/service/taskrouter/activity/create/activity_create_example.go
|
package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v1 "github.com/RJPearson94/twilio-sdk-go/service/taskrouter/v1"
"github.com/RJPearson94/twilio-sdk-go/service/taskrouter/v1/workspace/activities"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
"github.com/RJPearson94/twilio-sdk-go/utils"
)
var taskrouterClient *v1.TaskRouter
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
taskrouterClient = twilio.NewWithCredentials(creds).TaskRouter.V1
}
func main() {
resp, err := taskrouterClient.
Workspace("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Activities.
Create(&activities.CreateActivityInput{
FriendlyName: "NewActivity",
Available: utils.Bool(true),
})
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("SID: %s", resp.Sid)
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] |
[] |
[
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
go
| 2 | 0 | |
modulestf/upload.py
|
import os
import uuid
from hashlib import md5
import boto3
from modulestf.const import *
from modulestf.logger import setup_logging
logger = setup_logging()
def upload_file_to_s3(filename):
s3_bucket = os.environ.get("S3_BUCKET", S3_BUCKET)
s3_dir = os.environ.get("S3_DIR", "local")
zip_filename = md5(bytes(uuid.uuid4().hex, "ascii")).hexdigest() + ".zip"
s3 = boto3.client("s3", region_name=S3_BUCKET_REGION)
s3_key = s3_dir + "/" + zip_filename
s3.upload_file(filename, s3_bucket, s3_key, ExtraArgs=dict(
ACL="public-read",
ContentType="application/zip",
StorageClass="ONEZONE_IA"
))
link = "https://" + s3_bucket + "/" + s3_key
logger.info("LINK=" + link)
return link
|
[] |
[] |
[
"S3_BUCKET",
"S3_DIR"
] |
[]
|
["S3_BUCKET", "S3_DIR"]
|
python
| 2 | 0 | |
manage.py
|
import os
import unittest
import coverage
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from app import blueprint
from app.main import create_app, db
# Models
from app.main.model import user, folder
app = create_app(os.getenv('BOILERPLATE_ENV') or 'dev')
app.register_blueprint(blueprint)
app.app_context().push()
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.command
def run():
app.run()
@manager.command
def test():
"""Runs the unit tests."""
tests = unittest.TestLoader().discover('app/test', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def run_coverage():
"""Runs the unit tests with Coverage Report"""
cov = coverage.Coverage()
cov.start()
val = 1
tests = unittest.TestLoader().discover('app/test', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=0).run(tests)
if result.wasSuccessful():
val = 0
cov.stop()
cov.save()
cov.html_report()
cov.report()
return val
if __name__ == '__main__':
manager.run()
|
[] |
[] |
[
"BOILERPLATE_ENV"
] |
[]
|
["BOILERPLATE_ENV"]
|
python
| 1 | 0 | |
trolley.py
|
#!/usr/bin/env python
"""
Trolley syncs issues between CSV, Github, and Buffer with Trello.
"""
import csv
import datetime
import os
import random
import click
import click_config
import github3
from buffpy.api import API as BufferAPI
from buffpy.managers.profiles import Profiles
from buffpy.managers.updates import Updates
from trello import TrelloClient
__author__ = 'Jeff Triplett'
__copyright__ = 'Copyright 2015, Jeff Triplett'
__license__ = 'BSD'
__version__ = '0.1.6'
# hold auth state
_buffer_auth = None
_github_auth = None
_trello_auth = None
BUFFER_CLIENT_ID = os.environ.get('BUFFER_CLIENT_ID')
BUFFER_CLIENT_SECRET = os.environ.get('BUFFER_CLIENT_SECRET')
BUFFER_ACCESS_TOKEN = os.environ.get('BUFFER_ACCESS_TOKEN')
GITHUB_USERNAME = os.environ.get('GITHUB_USERNAME')
GITHUB_PASSWORD = os.environ.get('GITHUB_PASSWORD')
GITHUB_ORG = os.environ.get('GITHUB_ORG')
GITHUB_REPO = os.environ.get('GITHUB_REPO')
GITHUB_SCOPES = ['user', 'repo']
TRELLO_APP_KEY = os.environ.get('TRELLO_APP_KEY')
TRELLO_APP_SECRET = os.environ.get('TRELLO_APP_SECRET')
TRELLO_AUTH_TOKEN = os.environ.get('TRELLO_AUTH_TOKEN')
TRELLO_BOARD_ID = os.environ.get('TRELLO_BOARD_ID')
TRELLO_DEFAULT_LIST = os.environ.get('TRELLO_DEFAULT_LIST', 'Uncategorized')
# might migrate to:
# http://click.pocoo.org/4/options/#values-from-environment-variables
class config(object):
class buffer(object):
client_id = BUFFER_CLIENT_ID
client_secret = BUFFER_CLIENT_SECRET
access_token = BUFFER_ACCESS_TOKEN
class github(object):
username = GITHUB_USERNAME
password = GITHUB_PASSWORD
org = GITHUB_ORG
repo = GITHUB_REPO
class trello(object):
app_key = TRELLO_APP_KEY
app_secret = TRELLO_APP_SECRET
auth_token = TRELLO_AUTH_TOKEN
board_id = TRELLO_BOARD_ID
default_list = TRELLO_DEFAULT_LIST
# utils
def csv_to_dict_list(filename):
"""Open a CSV file and return a list of dict objects."""
with open(filename) as f:
values = list(csv.DictReader(f))
return values
def get_random_color():
filename = 'etc/color-blind-safe.csv'
colors = csv_to_dict_list(filename)
index = random.randint(0, len(colors))
return colors[index]['color']
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('version {}'.format(__version__))
ctx.exit()
# github utils
def get_github_auth(github_config):
"""Log me into github and return an object."""
global _github_auth
if _github_auth:
return _github_auth
assert github_config.username
assert github_config.password
_github_auth = github3.login(
github_config.username,
github_config.password)
return _github_auth
def get_github_repository(config, github_org, github_repo):
"""Return a repository object and log me in."""
github = get_github_auth(config.github)
repository = github.repository(github_org, github_repo)
return repository
def get_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_issues = [str(item.title) for item in repository.iter_issues()]
return existing_issues
def get_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_labels = [str(item.name) for item in repository.iter_labels()]
return existing_labels
def get_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = [str(item.title) for item in repository.iter_milestones()]
return existing_milestones
# github core
def close_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
issues = [str(issue.title) for issue in repository.iter_issues()]
click.echo('closing {} issues'.format(len(issues)))
for issue in repository.iter_issues():
click.echo('closing issue "{}"'.format(issue.title))
issue.close()
def create_github_issues(config, github_org, github_repo,
filename='etc/default_github_issues.csv'):
issues = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_issues = get_existing_github_issues(config, github_org, github_repo)
click.echo('creating {} issues'.format(len(issues)))
for issue in issues:
title = str(issue['title'])
body = str(issue['body'])
labels = issue['labels']
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if title not in existing_issues:
click.echo('creating issue "{}"'.format(title))
repository.create_issue(title, body, labels=labels)
else:
click.echo('issue "{}" already exists'.format(title))
def create_github_labels(config, github_org, github_repo,
filename='etc/default_github_labels.csv'):
labels = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_labels = get_existing_github_labels(config, github_org, github_repo)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
repository.create_label(name, color)
else:
click.echo('label "{}" already exists'.format(name))
def create_github_milestones(config, github_org, github_repo,
filename='etc/default_github_milestones.csv'):
milestones = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = get_existing_github_milestones(config, github_org, github_repo)
click.echo('creating {} milestones'.format(len(milestones)))
for milestone in milestones:
title = str(milestone['title'])
if title not in existing_milestones:
click.echo('creating milestone "{}"'.format(title))
repository.create_milestone(title)
else:
click.echo('milestone "{}" already exists'.format(title))
def delete_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
labels = [str(label.name) for label in repository.iter_labels()]
click.echo('removing {} labels'.format(len(labels)))
for label in labels:
click.echo('removing label "{}"'.format(label))
repository.label(label).delete()
def delete_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
milestones = repository.iter_milestones(github_org, github_repo)
click.echo('removing {} milestones'.format(len(list(milestones))))
for milestone in milestones:
click.echo('removing milestone "{}"'.format(milestone.title))
milestone.delete()
# trello utils
def get_trello_auth(trello_config):
"""Log me into trello and return an object."""
global _trello_auth
if _trello_auth:
return _trello_auth
assert trello_config.app_key
assert trello_config.app_secret
assert trello_config.auth_token
_trello_auth = TrelloClient(
api_key=trello_config.app_key,
api_secret=trello_config.app_secret,
token=trello_config.auth_token,
# token_secret=str(trello_config.auth_token),
)
return _trello_auth
def get_existing_trello_boards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
boards = [str(board.name) for board in board.get_cards()]
return boards
def get_existing_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
cards = board.get_cards()
cards = [str(card.name) for card in cards]
return cards
def get_existing_trello_labels(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
labels = board.get_labels()
labels = [label for label in labels]
return labels
def get_existing_trello_lists(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
all_lists = [item.name for item in all_lists]
return all_lists
def get_trello_list_lookup(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
list_lookup = {}
for item in all_lists:
id = item.id
name = item.name
list_lookup[name] = id
list_lookup[id] = name
default_list = config.trello.default_list
if default_list not in list_lookup:
new_list = board.add_list(default_list)
new_list_id = new_list.id
list_lookup[default_list] = new_list_id
list_lookup[new_list_id] = default_list
return list_lookup
# trello core
def create_trello_cards(config, trello_board_id,
filename='etc/default_trello_cards.csv'):
cards = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_cards = get_existing_trello_cards(config, trello_board_id)
board_lookup = get_trello_list_lookup(config, trello_board_id)
category = board_lookup[config.trello.default_list]
board = trello.get_board(trello_board_id)
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = str(card.get('title', ''))
description = str(card.get('body', ''))
labels = card.get('labels', [])
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if name not in existing_cards:
click.echo('creating issue "{}"'.format(name))
list_item = board.get_list(category)
new_card = list_item.add_card(name, description, labels=labels)
'''
# currently labels are broken in the trello python client :/
if len(labels):
for label in labels:
trello.cards.new_label(new_card['id'], label)
'''
else:
click.echo('issue "{}" already exists'.format(name))
def create_trello_labels(config, trello_board_id,
filename='etc/default_trello_labels.csv'):
labels = csv_to_dict_list(filename)
existing_labels = get_existing_trello_labels(config, trello_board_id)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
# TODO: Create Trello label via API
#repository.create_label(name, color)
else:
click.echo('label "{}" already exists'.format(name))
def create_trello_lists(config, trello_board_id,
filename='etc/default_trello_lists.csv'):
lists = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_lists = get_existing_trello_lists(config, trello_board_id)
click.echo('creating {} lists'.format(len(lists)))
for item in lists:
title = str(item['title'])
if title not in existing_lists:
click.echo('creating list "{}"'.format(title))
trello.boards.new_list(trello_board_id, title)
else:
click.echo('list "{}" already exists'.format(title))
def list_trello_boards(config):
trello = get_trello_auth(config.trello)
boards = trello.list_boards()
for board in boards:
click.echo('{0}: {1}{2}'.format(
board.id,
board.name,
' (closed)' if board.closed else ''
))
def list_trello_organizations(config):
trello = get_trello_auth(config.trello)
organizations = trello.list_organizations()
for organization in organizations:
click.echo('{0}: {1}'.format(
organization.id,
organization.name
))
# sync github and trello
def sync_github_issues_to_trello_cards(config, github_org, github_repo,
trello_board_id):
trello = get_trello_auth(config.trello)
board_lookup = get_trello_list_lookup(config, trello_board_id)
existing_trello_cards = get_existing_trello_cards(config, trello_board_id)
repository = get_github_repository(config, github_org, github_repo)
issues = repository.iter_issues()
#click.echo('creating {} issues'.format(issues.count))
for issue in issues:
title = issue.title
desc = issue.body
category = board_lookup[config.trello.default_list]
if title not in existing_trello_cards:
click.echo('creating issue "{}"'.format(title))
trello.cards.new(title, category, desc=desc)
else:
click.echo('issue "{}" already exists'.format(title))
def sync_trello_cards_to_github_issues(config, trello_board_id, github_org, github_repo):
trello = get_trello_auth(config.trello)
existing_github_issues = get_existing_github_issues(config, github_org, github_repo)
repository = get_github_repository(config, github_org, github_repo)
board = trello.get_board(trello_board_id)
cards = board.all_cards()
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = card.name
# id = card['id']
# list_id = card['idList']
description = card.description
labels = card.labels
if name not in existing_github_issues:
click.echo('creating card "{}"'.format(name))
repository.create_issue(name, description, labels=labels)
else:
click.echo('card "{}" already exists'.format(name))
def list_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(config.trello.board_id)
cards = [card for card in board.open_cards()]
for card in cards:
name = card.name
card_id = card.id
description = card.description
click.echo('{0}: {1}'.format(card_id, name))
if len(description):
click.echo(description)
def get_buffer_auth(buffer_config):
"""Log me into buffer and return an object."""
global _buffer_auth
if _buffer_auth:
return _buffer_auth
assert buffer_config.client_id
assert buffer_config.client_secret
assert buffer_config.access_token
_buffer_auth = BufferAPI(
client_id=buffer_config.client_id,
client_secret=buffer_config.client_secret,
access_token=buffer_config.access_token,
)
return _buffer_auth
def test_buffer(config):
client = get_buffer_auth(config.buffer)
profiles = Profiles(api=client).filter(service='twitter')
if not len(profiles):
raise Exception('Your twitter account is not configured')
profile = profiles[0]
print profile
print
pending = profile.updates.pending
for item in pending:
print item
print item.id
print item.text
print item.scheduled_at
print datetime.datetime.fromtimestamp(item.scheduled_at)
# cli methods we are exposing to be used via terminal
@click.group()
@click_config.wrap(module=config, sections=('github', 'trello'))
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True)
def cli():
assert config.buffer
pass
@cli.command('bootstrap')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_bootstrap(github_org, github_repo):
"""Sets up github with some sensible defaults."""
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('close_existing_github_issues')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_close_existing_github_issues(force, github_org, github_repo):
"""Close all existing GitHub issues."""
message = 'Do you really want to close all of your existing GitHub issues?'
if force or click.confirm(message):
close_existing_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('create_github_issues')
@click.option('--filename', default='etc/default_github_issues.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_issues(filename, github_org, github_repo):
"""Create GitHub issues from a CSV file."""
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_labels')
@click.option('--filename', default='etc/default_github_labels.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_labels(filename, github_org, github_repo):
"""Create GitHub labels from a CSV file."""
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_milestones')
@click.option('--filename', default='etc/default_github_milestones.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_milestones(filename, github_org, github_repo):
"""Create GitHub milestones from a CSV file."""
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_trello_cards')
@click.option('--filename', default='etc/default_trello_cards.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_cards(filename, trello_board):
"""Create Trello cards from a CSV file."""
create_trello_cards(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_labels')
@click.option('--filename', default='etc/default_trello_labels.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_labels(filename, trello_board):
"""Create Trello labels from a CSV file."""
create_trello_labels(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_lists')
@click.option('--filename', default='etc/default_trello_lists.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_lists(filename, trello_board):
"""Create Trello lists from a CSV file."""
create_trello_lists(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('delete_existing_github_labels')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_labels(force, github_org, github_repo):
"""Delete labels from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub labels?'
if force or click.confirm(message):
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('delete_existing_github_milestones')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_milestones(force, github_org, github_repo):
"""Delete milestones from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub milestones?'
if force or click.confirm(message):
delete_existing_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('sync_github_issues_to_trello_cards')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
@click.option('--trello-board', type=str)
def cli_sync_github_issues_to_trello_cards(github_org, github_repo, trello_board):
"""Convert your GitHub issues to Trello cards."""
sync_github_issues_to_trello_cards(
config,
github_org or config.github.org,
github_repo or config.github.repo,
trello_board or config.trello.board_id)
@cli.command('sync_trello_cards_to_github_issues')
@click.option('--trello-board', type=str)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_sync_trello_cards_to_github_issues(trello_board, github_org, github_repo):
"""Convert your Trello cards to GitHub issues."""
sync_trello_cards_to_github_issues(
config,
trello_board or config.trello.board_id,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('list_trello_boards')
def cli_list_trello_boards():
"""List your Trello boards."""
list_trello_boards(config)
@cli.command('list_trello_cards')
@click.option('--trello-board', type=str)
def cli_list_trello_cards(trello_board):
"""List your Trello cards for a given board."""
list_trello_cards(
config,
trello_board or config.trello.board_id)
@cli.command('list_trello_organizations')
def cli_list_trello_organizations():
"""List your Trello organizations."""
list_trello_organizations(config)
@cli.command('test_buffer')
def cli_test_buffer():
"""Convert your Trello cards to GitHub issues."""
try:
test_buffer(config)
except Exception as e:
print e
if __name__ == '__main__':
cli()
|
[] |
[] |
[
"GITHUB_USERNAME",
"GITHUB_ORG",
"TRELLO_BOARD_ID",
"TRELLO_DEFAULT_LIST",
"GITHUB_PASSWORD",
"BUFFER_ACCESS_TOKEN",
"BUFFER_CLIENT_SECRET",
"TRELLO_AUTH_TOKEN",
"TRELLO_APP_KEY",
"TRELLO_APP_SECRET",
"BUFFER_CLIENT_ID",
"GITHUB_REPO"
] |
[]
|
["GITHUB_USERNAME", "GITHUB_ORG", "TRELLO_BOARD_ID", "TRELLO_DEFAULT_LIST", "GITHUB_PASSWORD", "BUFFER_ACCESS_TOKEN", "BUFFER_CLIENT_SECRET", "TRELLO_AUTH_TOKEN", "TRELLO_APP_KEY", "TRELLO_APP_SECRET", "BUFFER_CLIENT_ID", "GITHUB_REPO"]
|
python
| 12 | 0 | |
config.py
|
# config.py
import os
class BaseConfig(object):
CACHE_TYPE = os.environ['CACHE_TYPE']
CACHE_REDIS_HOST = os.environ['CACHE_REDIS_HOST']
CACHE_REDIS_PORT = os.environ['CACHE_REDIS_PORT']
CACHE_REDIS_DB = os.environ['CACHE_REDIS_DB']
CACHE_REDIS_URL = os.environ['CACHE_REDIS_URL']
CACHE_DEFAULT_TIMEOUT = os.environ['CACHE_DEFAULT_TIMEOUT']
|
[] |
[] |
[
"CACHE_REDIS_DB",
"CACHE_TYPE",
"CACHE_REDIS_PORT",
"CACHE_REDIS_HOST",
"CACHE_REDIS_URL",
"CACHE_DEFAULT_TIMEOUT"
] |
[]
|
["CACHE_REDIS_DB", "CACHE_TYPE", "CACHE_REDIS_PORT", "CACHE_REDIS_HOST", "CACHE_REDIS_URL", "CACHE_DEFAULT_TIMEOUT"]
|
python
| 6 | 0 | |
rtm/http.go
|
package rtm
import (
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"sort"
"strings"
)
type HTTP struct {
Token string
ApiKey string
BaseURL string
}
type ErrorResponse struct {
Msg string
Code string
}
func (err *ErrorResponse) Error() string {
return fmt.Sprintf("%s (code=%s)", err.Msg, err.Code)
}
const baseURL = "https://api.rememberthemilk.com/services/rest"
const authURL = "https://api.rememberthemilk.com/services/auth"
func (c *HTTP) GetAuthURL(frob string, perms []string) string {
query := map[string]string{}
query["api_key"] = c.ApiKey
query["perms"] = strings.Join(perms, ",")
query["frob"] = frob
sig := signParams(query)
m := url.Values{}
for k, v := range query {
m.Add(k, v)
}
m.Add("api_sig", sig)
return authURL + "?" + m.Encode()
}
func (c *HTTP) VerifyResponse(err error, stat string, resp ErrorResponse) error {
if err != nil {
return err
}
if stat == "fail" {
return &ErrorResponse{Msg: resp.Msg, Code: resp.Code}
}
return nil
}
func (c *HTTP) Request(method string, params map[string]string, result interface{}) error {
resp, err := c.doRequest(method, params, c.Token, c.ApiKey)
if err != nil {
fmt.Println(err)
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return err
}
if result != nil {
if err := json.Unmarshal(body, &result); err != nil {
log.Fatal(err)
return err
}
}
//TODO: support debug mode
//fmt.Println(string(body))
return nil
}
func (c *HTTP) doRequest(method string, params map[string]string, token string, apiKey string) (*http.Response, error) {
baseParams := map[string]string{
"api_key": apiKey,
"auth_token": token,
"format": "json",
"method": method,
}
apiParams := mergeParams(baseParams, params)
signature := signParams(apiParams)
apiParams["api_sig"] = signature
url := c.createURL(apiParams)
return http.Get(url)
}
func (c *HTTP) createURL(params map[string]string) string {
m := url.Values{}
for k, v := range params {
m.Add(k, v)
}
return c.BaseURL + "/services/rest?" + m.Encode()
}
func signParams(params map[string]string) string {
sharedSecret := os.Getenv("RTM_SHARED_SECRET")
// Extract keys and sort them
keys := make([]string, 0)
for k := range params {
keys = append(keys, k)
}
sort.Sort(sort.StringSlice(keys))
// Concatenate all keys and values
items := make([]string, 0)
for _, k := range keys {
item := k + params[k]
items = append(items, item)
}
text := sharedSecret + strings.Join(items, "")
return getMD5(text)
}
func getMD5(text string) string {
hasher := md5.New()
hasher.Write([]byte(text))
return hex.EncodeToString(hasher.Sum(nil))
}
func mergeParams(map1 map[string]string, map2 map[string]string) map[string]string {
m := make(map[string]string, len(map1)+len(map2))
for k, v := range map1 {
m[k] = v
}
for k, v := range map2 {
m[k] = v
}
return m
}
|
[
"\"RTM_SHARED_SECRET\""
] |
[] |
[
"RTM_SHARED_SECRET"
] |
[]
|
["RTM_SHARED_SECRET"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"encoding/json"
"errors"
"log"
"time"
"net/http"
"os"
"strconv"
ld "gopkg.in/launchdarkly/go-server-sdk.v4"
"gopkg.in/launchdarkly/go-sdk-common.v1/ldvalue"
"github.com/gorilla/mux"
)
var client *ld.LDClient
type UserRequest struct {
User ld.User `json:"user"`
}
type CustomEventRequest struct {
User ld.User `json:"user"`
Key string `json:"key"`
Data ldvalue.Value `json:"data"`
MetricValue *float64 `json:"metricValue"`
}
type RootResponse struct {
Initialized bool `json:"initialized"`
}
type EvalFeatureRequest struct {
User ld.User `json:"user"`
DefaultValue ldvalue.Value `json:"defaultValue"`
Detail bool `json:"detail"`
}
type EvalFeatureResponse struct {
Key string `json:"key"`
Result ldvalue.Value `json:"result"`
VariationIndex *int `json:"variationIndex,omitempty"`
Reason ld.EvaluationReason `json:"reason,omitempty"`
}
func getRootHandler(w http.ResponseWriter, r *http.Request) {
responseBody := RootResponse{
Initialized: client.Initialized(),
}
responseEncoded, err := json.Marshal(responseBody)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(responseEncoded)
}
func postFlushHandler(w http.ResponseWriter, req *http.Request) {
client.Flush()
w.WriteHeader(http.StatusNoContent)
}
func postEventHandler(w http.ResponseWriter, req *http.Request) {
if req.Body == nil {
http.Error(w, "expected a body", http.StatusBadRequest)
return
}
var params CustomEventRequest
err := json.NewDecoder(req.Body).Decode(¶ms)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if params.MetricValue == nil {
err = client.Track(params.Key, params.User, params.Data)
} else {
err = client.TrackWithMetric(params.Key, params.User, params.Data, *params.MetricValue)
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusNoContent)
}
func postIdentifyHandler(w http.ResponseWriter, req *http.Request) {
if req.Body == nil {
http.Error(w, "expected a body", http.StatusBadRequest)
return
}
var params UserRequest
err := json.NewDecoder(req.Body).Decode(¶ms)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = client.Identify(params.User)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusNoContent)
}
func postAllFlagsHandler(w http.ResponseWriter, req *http.Request) {
if req.Body == nil {
http.Error(w, "expected a body", http.StatusBadRequest)
return
}
var params UserRequest
err := json.NewDecoder(req.Body).Decode(¶ms)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
allFlags := client.AllFlags(params.User)
allFlagsEncoded, err := json.Marshal(allFlags)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(allFlagsEncoded)
}
func PostVariationHandler(w http.ResponseWriter, req *http.Request) {
key := mux.Vars(req)["key"]
if req.Body == nil {
http.Error(w, "expected a body", http.StatusBadRequest)
return
}
var params EvalFeatureRequest
err := json.NewDecoder(req.Body).Decode(¶ms)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
response := EvalFeatureResponse{Key: key}
if params.Detail {
value, detail, _ := client.JSONVariationDetail(key, params.User, params.DefaultValue)
response.Result = value
response.VariationIndex = detail.VariationIndex
response.Reason = detail.Reason
} else {
value, _ := client.JSONVariation(key, params.User, params.DefaultValue)
response.Result = value
}
encoded, err := json.Marshal(response)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(encoded)
}
type Config struct {
port uint16
key string
}
func makeDefaultConfig() Config {
return Config{
port: 8080,
key: "",
}
}
func loadConfigFromEnvironment(config *Config) error {
key := os.Getenv("SDK_KEY")
if key == "" {
return errors.New("SDK_KEY is required")
} else {
config.key = key
}
port := os.Getenv("PORT")
if port != "" {
if x, err := strconv.ParseUint(port, 10, 16); err == nil {
config.port = uint16(x)
} else {
return err
}
}
return nil
}
func main() {
config := makeDefaultConfig()
err := loadConfigFromEnvironment(&config)
if err != nil {
log.Fatal(err)
return
}
client, _ = ld.MakeClient(config.key, 5 * time.Second)
defer client.Close()
router := mux.NewRouter()
router.HandleFunc("/", getRootHandler).Methods("GET")
router.HandleFunc("/track", postEventHandler).Methods("POST")
router.HandleFunc("/flush", postFlushHandler).Methods("POST")
router.HandleFunc("/identify", postIdentifyHandler).Methods("POST")
router.HandleFunc("/allFlags", postAllFlagsHandler).Methods("POST")
router.HandleFunc("/feature/{key}/eval", PostVariationHandler).Methods("POST")
http.Handle("/", router)
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(int(config.port)), nil))
}
|
[
"\"SDK_KEY\"",
"\"PORT\""
] |
[] |
[
"PORT",
"SDK_KEY"
] |
[]
|
["PORT", "SDK_KEY"]
|
go
| 2 | 0 | |
docker/pkg/archive/example_changes.go
|
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/ory/dockertest/v3/docker/pkg/archive"
"github.com/sirupsen/logrus"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
lib/dpdk-stable-19.08.2/usertools/dpdk-pmdinfo.py
|
#!/usr/bin/env python
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2016 Neil Horman <[email protected]>
# -------------------------------------------------------------------------
#
# Utility to dump PMD_INFO_STRING support from an object file
#
# -------------------------------------------------------------------------
from __future__ import print_function
import json
import os
import platform
import string
import sys
from elftools.common.exceptions import ELFError
from elftools.common.py3compat import (byte2int, bytes2str, str2bytes)
from elftools.elf.elffile import ELFFile
from optparse import OptionParser
# For running from development directory. It should take precedence over the
# installed pyelftools.
sys.path.insert(0, '.')
raw_output = False
pcidb = None
# ===========================================
class Vendor:
"""
Class for vendors. This is the top level class
for the devices belong to a specific vendor.
self.devices is the device dictionary
subdevices are in each device.
"""
def __init__(self, vendorStr):
"""
Class initializes with the raw line from pci.ids
Parsing takes place inside __init__
"""
self.ID = vendorStr.split()[0]
self.name = vendorStr.replace("%s " % self.ID, "").rstrip()
self.devices = {}
def addDevice(self, deviceStr):
"""
Adds a device to self.devices
takes the raw line from pci.ids
"""
s = deviceStr.strip()
devID = s.split()[0]
if devID in self.devices:
pass
else:
self.devices[devID] = Device(deviceStr)
def report(self):
print(self.ID, self.name)
for id, dev in self.devices.items():
dev.report()
def find_device(self, devid):
# convert to a hex string and remove 0x
devid = hex(devid)[2:]
try:
return self.devices[devid]
except:
return Device("%s Unknown Device" % devid)
class Device:
def __init__(self, deviceStr):
"""
Class for each device.
Each vendor has its own devices dictionary.
"""
s = deviceStr.strip()
self.ID = s.split()[0]
self.name = s.replace("%s " % self.ID, "")
self.subdevices = {}
def report(self):
print("\t%s\t%s" % (self.ID, self.name))
for subID, subdev in self.subdevices.items():
subdev.report()
def addSubDevice(self, subDeviceStr):
"""
Adds a subvendor, subdevice to device.
Uses raw line from pci.ids
"""
s = subDeviceStr.strip()
spl = s.split()
subVendorID = spl[0]
subDeviceID = spl[1]
subDeviceName = s.split(" ")[-1]
devID = "%s:%s" % (subVendorID, subDeviceID)
self.subdevices[devID] = SubDevice(
subVendorID, subDeviceID, subDeviceName)
def find_subid(self, subven, subdev):
subven = hex(subven)[2:]
subdev = hex(subdev)[2:]
devid = "%s:%s" % (subven, subdev)
try:
return self.subdevices[devid]
except:
if (subven == "ffff" and subdev == "ffff"):
return SubDevice("ffff", "ffff", "(All Subdevices)")
else:
return SubDevice(subven, subdev, "(Unknown Subdevice)")
class SubDevice:
"""
Class for subdevices.
"""
def __init__(self, vendor, device, name):
"""
Class initializes with vendorid, deviceid and name
"""
self.vendorID = vendor
self.deviceID = device
self.name = name
def report(self):
print("\t\t%s\t%s\t%s" % (self.vendorID, self.deviceID, self.name))
class PCIIds:
"""
Top class for all pci.ids entries.
All queries will be asked to this class.
PCIIds.vendors["0e11"].devices["0046"].\
subdevices["0e11:4091"].name = "Smart Array 6i"
"""
def __init__(self, filename):
"""
Prepares the directories.
Checks local data file.
Tries to load from local, if not found, downloads from web
"""
self.version = ""
self.date = ""
self.vendors = {}
self.contents = None
self.readLocal(filename)
self.parse()
def reportVendors(self):
"""Reports the vendors
"""
for vid, v in self.vendors.items():
print(v.ID, v.name)
def report(self, vendor=None):
"""
Reports everything for all vendors or a specific vendor
PCIIds.report() reports everything
PCIIDs.report("0e11") reports only "Compaq Computer Corporation"
"""
if vendor is not None:
self.vendors[vendor].report()
else:
for vID, v in self.vendors.items():
v.report()
def find_vendor(self, vid):
# convert vid to a hex string and remove the 0x
vid = hex(vid)[2:]
try:
return self.vendors[vid]
except:
return Vendor("%s Unknown Vendor" % (vid))
def findDate(self, content):
for l in content:
if l.find("Date:") > -1:
return l.split()[-2].replace("-", "")
return None
def parse(self):
if len(self.contents) < 1:
print("data/%s-pci.ids not found" % self.date)
else:
vendorID = ""
deviceID = ""
for l in self.contents:
if l[0] == "#":
continue
elif len(l.strip()) == 0:
continue
else:
if l.find("\t\t") == 0:
self.vendors[vendorID].devices[
deviceID].addSubDevice(l)
elif l.find("\t") == 0:
deviceID = l.strip().split()[0]
self.vendors[vendorID].addDevice(l)
else:
vendorID = l.split()[0]
self.vendors[vendorID] = Vendor(l)
def readLocal(self, filename):
"""
Reads the local file
"""
self.contents = open(filename).readlines()
self.date = self.findDate(self.contents)
def loadLocal(self):
"""
Loads database from local. If there is no file,
it creates a new one from web
"""
self.date = idsfile[0].split("/")[1].split("-")[0]
self.readLocal()
# =======================================
def search_file(filename, search_path):
""" Given a search path, find file with requested name """
for path in string.split(search_path, ":"):
candidate = os.path.join(path, filename)
if os.path.exists(candidate):
return os.path.abspath(candidate)
return None
class ReadElf(object):
""" display_* methods are used to emit output into the output stream
"""
def __init__(self, file, output):
""" file:
stream object with the ELF file to read
output:
output stream to write to
"""
self.elffile = ELFFile(file)
self.output = output
# Lazily initialized if a debug dump is requested
self._dwarfinfo = None
self._versioninfo = None
def _section_from_spec(self, spec):
""" Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
"""
try:
num = int(spec)
if num < self.elffile.num_sections():
return self.elffile.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elffile.get_section_by_name(str2bytes(spec))
def pretty_print_pmdinfo(self, pmdinfo):
global pcidb
for i in pmdinfo["pci_ids"]:
vendor = pcidb.find_vendor(i[0])
device = vendor.find_device(i[1])
subdev = device.find_subid(i[2], i[3])
print("%s (%s) : %s (%s) %s" %
(vendor.name, vendor.ID, device.name,
device.ID, subdev.name))
def parse_pmd_info_string(self, mystring):
global raw_output
global pcidb
optional_pmd_info = [
{'id': 'params', 'tag': 'PMD PARAMETERS'},
{'id': 'kmod', 'tag': 'PMD KMOD DEPENDENCIES'}
]
i = mystring.index("=")
mystring = mystring[i + 2:]
pmdinfo = json.loads(mystring)
if raw_output:
print(json.dumps(pmdinfo))
return
print("PMD NAME: " + pmdinfo["name"])
for i in optional_pmd_info:
try:
print("%s: %s" % (i['tag'], pmdinfo[i['id']]))
except KeyError:
continue
if (len(pmdinfo["pci_ids"]) != 0):
print("PMD HW SUPPORT:")
if pcidb is not None:
self.pretty_print_pmdinfo(pmdinfo)
else:
print("VENDOR\t DEVICE\t SUBVENDOR\t SUBDEVICE")
for i in pmdinfo["pci_ids"]:
print("0x%04x\t 0x%04x\t 0x%04x\t\t 0x%04x" %
(i[0], i[1], i[2], i[3]))
print("")
def display_pmd_info_strings(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = self._section_from_spec(section_spec)
if section is None:
return
data = section.data()
dataptr = 0
while dataptr < len(data):
while (dataptr < len(data) and
not (32 <= byte2int(data[dataptr]) <= 127)):
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
mystring = bytes2str(data[dataptr:endptr])
rc = mystring.find("PMD_INFO_STRING")
if (rc != -1):
self.parse_pmd_info_string(mystring)
dataptr = endptr
def find_librte_eal(self, section):
for tag in section.iter_tags():
if tag.entry.d_tag == 'DT_NEEDED':
if "librte_eal" in tag.needed:
return tag.needed
return None
def search_for_autoload_path(self):
scanelf = self
scanfile = None
library = None
section = self._section_from_spec(".dynamic")
try:
eallib = self.find_librte_eal(section)
if eallib is not None:
ldlibpath = os.environ.get('LD_LIBRARY_PATH')
if ldlibpath is None:
ldlibpath = ""
dtr = self.get_dt_runpath(section)
library = search_file(eallib,
dtr + ":" + ldlibpath +
":/usr/lib64:/lib64:/usr/lib:/lib")
if library is None:
return (None, None)
if raw_output is False:
print("Scanning for autoload path in %s" % library)
scanfile = open(library, 'rb')
scanelf = ReadElf(scanfile, sys.stdout)
except AttributeError:
# Not a dynamic binary
pass
except ELFError:
scanfile.close()
return (None, None)
section = scanelf._section_from_spec(".rodata")
if section is None:
if scanfile is not None:
scanfile.close()
return (None, None)
data = section.data()
dataptr = 0
while dataptr < len(data):
while (dataptr < len(data) and
not (32 <= byte2int(data[dataptr]) <= 127)):
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
mystring = bytes2str(data[dataptr:endptr])
rc = mystring.find("DPDK_PLUGIN_PATH")
if (rc != -1):
rc = mystring.find("=")
return (mystring[rc + 1:], library)
dataptr = endptr
if scanfile is not None:
scanfile.close()
return (None, None)
def get_dt_runpath(self, dynsec):
for tag in dynsec.iter_tags():
if tag.entry.d_tag == 'DT_RUNPATH':
return tag.runpath
return ""
def process_dt_needed_entries(self):
""" Look to see if there are any DT_NEEDED entries in the binary
And process those if there are
"""
global raw_output
runpath = ""
ldlibpath = os.environ.get('LD_LIBRARY_PATH')
if ldlibpath is None:
ldlibpath = ""
dynsec = self._section_from_spec(".dynamic")
try:
runpath = self.get_dt_runpath(dynsec)
except AttributeError:
# dynsec is None, just return
return
for tag in dynsec.iter_tags():
if tag.entry.d_tag == 'DT_NEEDED':
rc = tag.needed.find(b"librte_pmd")
if (rc != -1):
library = search_file(tag.needed,
runpath + ":" + ldlibpath +
":/usr/lib64:/lib64:/usr/lib:/lib")
if library is not None:
if raw_output is False:
print("Scanning %s for pmd information" % library)
with open(library, 'rb') as file:
try:
libelf = ReadElf(file, sys.stdout)
except ELFError:
print("%s is no an ELF file" % library)
continue
libelf.process_dt_needed_entries()
libelf.display_pmd_info_strings(".rodata")
file.close()
def scan_autoload_path(autoload_path):
global raw_output
if os.path.exists(autoload_path) is False:
return
try:
dirs = os.listdir(autoload_path)
except OSError:
# Couldn't read the directory, give up
return
for d in dirs:
dpath = os.path.join(autoload_path, d)
if os.path.isdir(dpath):
scan_autoload_path(dpath)
if os.path.isfile(dpath):
try:
file = open(dpath, 'rb')
readelf = ReadElf(file, sys.stdout)
except ELFError:
# this is likely not an elf file, skip it
continue
except IOError:
# No permission to read the file, skip it
continue
if raw_output is False:
print("Hw Support for library %s" % d)
readelf.display_pmd_info_strings(".rodata")
file.close()
def scan_for_autoload_pmds(dpdk_path):
"""
search the specified application or path for a pmd autoload path
then scan said path for pmds and report hw support
"""
global raw_output
if (os.path.isfile(dpdk_path) is False):
if raw_output is False:
print("Must specify a file name")
return
file = open(dpdk_path, 'rb')
try:
readelf = ReadElf(file, sys.stdout)
except ElfError:
if raw_output is False:
print("Unable to parse %s" % file)
return
(autoload_path, scannedfile) = readelf.search_for_autoload_path()
if (autoload_path is None or autoload_path is ""):
if (raw_output is False):
print("No autoload path configured in %s" % dpdk_path)
return
if (raw_output is False):
if (scannedfile is None):
scannedfile = dpdk_path
print("Found autoload path %s in %s" % (autoload_path, scannedfile))
file.close()
if (raw_output is False):
print("Discovered Autoload HW Support:")
scan_autoload_path(autoload_path)
return
def main(stream=None):
global raw_output
global pcidb
pcifile_default = "./pci.ids" # For unknown OS's assume local file
if platform.system() == 'Linux':
pcifile_default = "/usr/share/hwdata/pci.ids"
elif platform.system() == 'FreeBSD':
pcifile_default = "/usr/local/share/pciids/pci.ids"
if not os.path.exists(pcifile_default):
pcifile_default = "/usr/share/misc/pci_vendors"
optparser = OptionParser(
usage='usage: %prog [-hrtp] [-d <pci id file] <elf-file>',
description="Dump pmd hardware support info",
add_help_option=True)
optparser.add_option('-r', '--raw',
action='store_true', dest='raw_output',
help='Dump raw json strings')
optparser.add_option("-d", "--pcidb", dest="pcifile",
help="specify a pci database "
"to get vendor names from",
default=pcifile_default, metavar="FILE")
optparser.add_option("-t", "--table", dest="tblout",
help="output information on hw support as a "
"hex table",
action='store_true')
optparser.add_option("-p", "--plugindir", dest="pdir",
help="scan dpdk for autoload plugins",
action='store_true')
options, args = optparser.parse_args()
if options.raw_output:
raw_output = True
if options.pcifile:
pcidb = PCIIds(options.pcifile)
if pcidb is None:
print("Pci DB file not found")
exit(1)
if options.tblout:
options.pcifile = None
pcidb = None
if (len(args) == 0):
optparser.print_usage()
exit(1)
if options.pdir is True:
exit(scan_for_autoload_pmds(args[0]))
ldlibpath = os.environ.get('LD_LIBRARY_PATH')
if (ldlibpath is None):
ldlibpath = ""
if (os.path.exists(args[0]) is True):
myelffile = args[0]
else:
myelffile = search_file(
args[0], ldlibpath + ":/usr/lib64:/lib64:/usr/lib:/lib")
if (myelffile is None):
print("File not found")
sys.exit(1)
with open(myelffile, 'rb') as file:
try:
readelf = ReadElf(file, sys.stdout)
readelf.process_dt_needed_entries()
readelf.display_pmd_info_strings(".rodata")
sys.exit(0)
except ELFError as ex:
sys.stderr.write('ELF error: %s\n' % ex)
sys.exit(1)
# -------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
[] |
[] |
[
"LD_LIBRARY_PATH"
] |
[]
|
["LD_LIBRARY_PATH"]
|
python
| 1 | 0 | |
scr/help_functions.py
|
"""
Biblioteca que tem como funcionalidade armazenas funções. Retirando assim a criação de funções de outros locais
"""
import uuid
import sys
import os
from flask_login import current_user
import bcrypt
PY2 = sys.version_info[0] == 2
class RoleMixin(object):
"""
"solução" para adicionar role ao flask login
"""
if not PY2: # pragma: no cover
# Python 3 implicitly set __hash__ to None if we override __eq__
# We set it back to its default implementation
__hash__ = object.__hash__
@property
def has_role(self):
return self.role
# Gera uma key aleatória para resetar a senha
def gerar_key():
return uuid.uuid4()
# pega do env o nome do email
def email_user():
return os.getenv("email_user")
# pega do env a senha do email
def email_passw():
return os.getenv("email_password")
# pega do env a ulr do site
def url_do_site():
return os.getenv("url_site")
def atualizar_perfil_func(nome: str, email: str, senha: str) -> bool:
"""
Atualiza o perfil do usuário fazendo uso do current user
@param nome: Nome do usuário recebido através do form de atualizar
@param email: Email do usuário recebido através do form de atualizar
@param senha: Senha do usuário recebido através do form de atualizar
@return: Retorna um bool, False caso não tenha atualização e True em caso de atualização
"""
houve_atualizacao_de_dados = False
if nome:
current_user.nome = nome
houve_atualizacao_de_dados = True
if email:
current_user.email = email
houve_atualizacao_de_dados = True
if senha:
if len(senha) > 3:
salt = bcrypt.gensalt()
senha_cripto = bcrypt.hashpw(senha.encode('utf-8'), salt)
current_user.senha = senha_cripto
houve_atualizacao_de_dados = True
return houve_atualizacao_de_dados
def eh_menor_que_essa_quantidade_de_caracters(palavra: str, quantidade: int) -> bool:
"""
Função para verificar se a string é menor que a quantidade de caracters informados
@param palavra: A palavra a ser verificada
@param quantidade: A quantidade de caracters que deseja verificar
@return: Retorna True em caso da palavra seja menor que a quantidade de caracters e False em caso negativo
"""
tamanho = len(palavra)
eh_menor = False
if tamanho < quantidade:
eh_menor = True
return eh_menor
def atualizar_pergunta_func(pergunta, titulo, pergunta_questao, resposta_1, resposta_2, resposta_3, resposta_4, resposta_certa, questao_dificuldade):
"""
Função para atualizar a pergunta
@param pergunta: A pergunta extraida do DB (objeto)
@param titulo: titulo extraido do form
@param pergunta_questao: a questão da pergunta extraida do form
@param resposta_1: a resposta 1 extraida do form
@param resposta_2: a resposta 2 extraida do form
@param resposta_3: a resposta 3 extraida do form
@param resposta_4: a resposta 4 extraida do form
@param resposta_certa: a resposta certa extraida do form
@param questao_dificuldade: a dificuldade da questão extraida do form
"""
pergunta.resposta_certa = resposta_certa
pergunta.questao_dificuldade = questao_dificuldade
if titulo:
pergunta.pergunta_titulo = titulo
if pergunta_questao:
pergunta.pergunta = pergunta_questao
if resposta_1:
pergunta.resp_1 = resposta_1
if resposta_2:
pergunta.resp_2 = resposta_2
if resposta_3:
pergunta.resp_3 = resposta_3
if resposta_4:
pergunta.resp_4 = resposta_4
|
[] |
[] |
[
"email_user",
"email_password",
"url_site"
] |
[]
|
["email_user", "email_password", "url_site"]
|
python
| 3 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.