filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pkg/util/downloads.go | package util
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/jenkins-x/jx-logging/pkg/log"
"github.com/pkg/errors"
"github.com/blang/semver"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
var githubClient *github.Client
// Download a file from the given URL
func DownloadFile(filepath string, url string) (err error) {
// Create the file
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close() //nolint:errcheck
// Get the data
resp, err := GetClientWithTimeout(time.Hour * 2).Get(url)
if err != nil {
return err
}
defer resp.Body.Close() //nolint:errcheck
if resp.StatusCode != http.StatusOK {
err := fmt.Errorf("download of %s failed with return code %d", url, resp.StatusCode)
return err
}
// Writer the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
// make it executable
err = os.Chmod(filepath, 0755)
if err != nil {
return err
}
return nil
}
func GetLatestVersionFromGitHub(githubOwner, githubRepo string) (semver.Version, error) {
text, err := GetLatestVersionStringFromGitHub(githubOwner, githubRepo)
if err != nil {
return semver.Version{}, err
}
if text == "" {
return semver.Version{}, fmt.Errorf("No version found")
}
return semver.Make(text)
}
func GetLatestVersionStringFromGitHub(githubOwner, githubRepo string) (string, error) {
latestVersionString, err := GetLatestReleaseFromGitHub(githubOwner, githubRepo)
if err != nil {
return "", err
}
if latestVersionString != "" {
return strings.TrimPrefix(latestVersionString, "v"), nil
}
return "", fmt.Errorf("Unable to find the latest version for github.com/%s/%s", githubOwner, githubRepo)
}
// GetLatestVersionStringFromBucketURLs return the latest version from a list of buckets with the version at the end of the path
func GetLatestVersionStringFromBucketURLs(versionStrings []string) (semver.Version, error) {
versions := make([]semver.Version, 0)
for _, versionStr := range versionStrings {
versionPaths := strings.Split(versionStr, "/")
version, err := semver.New(versionPaths[len(versionPaths)-2])
if err != nil {
return semver.Version{}, err
}
versions = append(versions, *version)
}
semver.Sort(versions)
return versions[len(versions)-1], nil
}
// GetLatestReleaseFromGitHub gets the latest Release from a specific github repo
func GetLatestReleaseFromGitHub(githubOwner, githubRepo string) (string, error) {
// Github has low (60/hour) unauthenticated limits from a single IP address. Try to get the latest release via HTTP
// first to avoid hitting this limit (eg, small company behind one IP address)
version := ""
var err error
version, err = getLatestReleaseFromGithubUsingHttpRedirect(githubOwner, githubRepo)
if version == "" || err != nil {
log.Logger().Warnf("getting latest release using HTTP redirect (%v) - using API instead", err)
version, err = getLatestReleaseFromGithubUsingApi(githubOwner, githubRepo)
}
return version, err
}
// GetLatestReleaseFromGitHubURL returns the latest release version for the git URL
func GetLatestReleaseFromGitHubURL(gitURL string) (string, error) {
const gitHubPrefix = "https://github.com/"
if !strings.HasPrefix(gitURL, gitHubPrefix) {
log.Logger().Warnf("cannot determine the latest release of version stream git URL %s\n", gitURL)
return "", nil
}
name := strings.TrimPrefix(gitURL, gitHubPrefix)
paths := strings.Split(name, "/")
if len(paths) <= 1 {
log.Logger().Warnf("cannot parse git URL %s so cannot determine the latest release\n", gitURL)
return "", nil
}
owner := paths[0]
repo := strings.TrimSuffix(paths[1], ".git")
return GetLatestReleaseFromGitHub(owner, repo)
}
func getLatestReleaseFromGithubUsingApi(githubOwner, githubRepo string) (string, error) {
client, release, resp, err := preamble()
release, resp, err = client.Repositories.GetLatestRelease(context.Background(), githubOwner, githubRepo)
if err != nil {
return "", errors.Wrapf(err, "getting latest version for github.com/%s/%s", githubOwner, githubRepo)
}
defer resp.Body.Close()
latestVersionString := release.TagName
if latestVersionString != nil {
return *latestVersionString, nil
}
return "", fmt.Errorf("unable to find the latest version for github.com/%s/%s", githubOwner, githubRepo)
}
func getLatestReleaseFromGithubUsingHttpRedirect(githubOwner, githubRepo string) (string, error) {
return getLatestReleaseFromHostUsingHttpRedirect("https://github.com", githubOwner, githubRepo)
}
func getLatestReleaseFromHostUsingHttpRedirect(host, githubOwner, githubRepo string) (string, error) {
// Github will redirect "https://github.com/organisation/repo/releases/latest" to the latest release, eg
// https://github.com/jenkins-x/jx/releases/tag/v1.3.696
// We can use this to get the latest release without affecting any API limits.
url := fmt.Sprintf("%s/%s/%s/releases/latest", host, githubOwner, githubRepo)
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error { // Don't follow redirects
// We want to follow 301 permanent redirects (eg, repo renames like kubernetes/helm --> helm/helm)
// but not temporary 302 temporary redirects (as these point to the latest tag)
if req.Response.StatusCode == 302 {
return http.ErrUseLastResponse
} else {
return nil
}
},
}
response, err := client.Get(url)
if err != nil {
return "", errors.Wrapf(err, "getting %s", url)
}
defer response.Body.Close()
if response.StatusCode >= 300 && response.StatusCode <= 399 {
location := response.Header.Get("Location")
if location == "" {
return "", fmt.Errorf("no location header in repsponse")
}
arr := strings.Split(location, "releases/tag/")
if len(arr) == 2 {
return arr[1], nil
} else {
return "", fmt.Errorf("unexpected location header: %s", location)
}
} else {
return "", fmt.Errorf("could not determine redirect for %s. Got a %v response", url, response.StatusCode)
}
}
// GetLatestFullTagFromGithub gets the latest 'full' tag from a specific github repo. This (at present) ignores releases
// with a hyphen in it, usually used with -SNAPSHOT, or -RC1 or -beta
func GetLatestFullTagFromGithub(githubOwner, githubRepo string) (*github.RepositoryTag, error) {
tags, err := GetTagsFromGithub(githubOwner, githubRepo)
if err == nil {
// Iterate over the tags to find the first that doesn't contain any hyphens in it (so is just x.y.z)
for _, tag := range tags {
name := *tag.Name
if !strings.ContainsRune(name, '-') {
return tag, nil
}
}
return nil, errors.Errorf("No Full releases found for %s/%s", githubOwner, githubRepo)
}
return nil, err
}
// GetLatestTagFromGithub gets the latest (in github order) tag from a specific github repo
func GetLatestTagFromGithub(githubOwner, githubRepo string) (string, error) {
tags, err := GetTagsFromGithub(githubOwner, githubRepo)
if err == nil {
return *tags[0].Name, nil
}
return "", err
}
// GetTagsFromGithub gets the list of tags on a specific github repo
func GetTagsFromGithub(githubOwner, githubRepo string) ([]*github.RepositoryTag, error) {
client, _, resp, err := preamble()
tags, resp, err := client.Repositories.ListTags(context.Background(), githubOwner, githubRepo, nil)
defer resp.Body.Close()
if err != nil {
return []*github.RepositoryTag{}, fmt.Errorf("Unable to get tags for github.com/%s/%s %v", githubOwner, githubRepo, err)
}
return tags, nil
}
func preamble() (*github.Client, *github.RepositoryRelease, *github.Response, error) {
if githubClient == nil {
token := os.Getenv("GH_TOKEN")
var tc *http.Client
if len(token) > 0 {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
tc = oauth2.NewClient(context.TODO(), ts)
}
githubClient = github.NewClient(tc)
}
client := githubClient
var (
release *github.RepositoryRelease
resp *github.Response
err error
)
return client, release, resp, err
}
// untargz a tarball to a target, from
// http://blog.ralch.com/tutorial/golang-working-with-tar-and-gzipf
func UnTargz(tarball, target string, onlyFiles []string) error {
zreader, err := os.Open(tarball)
if err != nil {
return err
}
defer zreader.Close() //nolint:errcheck
reader, err := gzip.NewReader(zreader)
defer reader.Close() //nolint:errcheck
if err != nil {
return err
}
tarReader := tar.NewReader(reader)
for {
inkey := false
header, err := tarReader.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
for _, value := range onlyFiles {
if value == "*" || value == path.Base(header.Name) {
inkey = true
break
}
}
if !inkey && len(onlyFiles) > 0 {
continue
}
path := filepath.Join(target, path.Base(header.Name))
err = UnTarFile(header, path, tarReader)
if err != nil {
return err
}
}
return nil
}
// untargz a tarball to a target including any folders inside the tarball
// http://blog.ralch.com/tutorial/golang-working-with-tar-and-gzipf
func UnTargzAll(tarball, target string) error {
zreader, err := os.Open(tarball)
if err != nil {
return err
}
defer zreader.Close()
reader, err := gzip.NewReader(zreader)
defer reader.Close()
if err != nil {
panic(err)
}
tarReader := tar.NewReader(reader)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
path := filepath.Join(target, header.Name)
err = UnTarFile(header, path, tarReader)
if err != nil {
return err
}
}
return nil
}
// UnTarFile extracts one file from the tar, or creates a directory
func UnTarFile(header *tar.Header, target string, tarReader io.Reader) error {
info := header.FileInfo()
if info.IsDir() {
if err := os.MkdirAll(target, info.Mode()); err != nil {
return err
}
return nil
}
// In a normal archive, directories are mentionned before their files
// But in an archive generated by helm, no directories are mentionned
if err := os.MkdirAll(path.Dir(target), 0755); err != nil {
return err
}
file, err := os.OpenFile(target, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode())
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, tarReader)
return err
}
| [
"\"GH_TOKEN\""
]
| []
| [
"GH_TOKEN"
]
| [] | ["GH_TOKEN"] | go | 1 | 0 | |
tests/test_arbiter.py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import gunicorn.app.base
import gunicorn.arbiter
class PreloadedAppWithEnvSettings(gunicorn.app.base.BaseApplication):
"""
Simple application that makes use of the 'preload' feature to
start the application before spawning worker processes and sets
environmental variable configuration settings.
"""
def init(self, parser, opts, args):
"""No-op"""
def load(self):
"""No-op"""
def load_config(self):
"""Set the 'preload_app' and 'raw_env' settings in order to verify their
interaction below.
"""
self.cfg.set('raw_env', [
'SOME_PATH=/tmp/something', 'OTHER_PATH=/tmp/something/else'])
self.cfg.set('preload_app', True)
def wsgi(self):
"""Assert that the expected environmental variables are set when
the main entry point of this application is called as part of a
'preloaded' application.
"""
verify_env_vars()
return super(PreloadedAppWithEnvSettings, self).wsgi()
def verify_env_vars():
assert os.getenv('SOME_PATH') == '/tmp/something'
assert os.getenv('OTHER_PATH') == '/tmp/something/else'
def test_env_vars_available_during_preload():
"""Ensure that configured environmental variables are set during the
initial set up of the application (called from the .setup() method of
the Arbiter) such that they are available during the initial loading
of the WSGI application.
"""
# Note that we aren't making any assertions here, they are made in the
# dummy application object being loaded here instead.
gunicorn.arbiter.Arbiter(PreloadedAppWithEnvSettings())
| []
| []
| [
"SOME_PATH",
"OTHER_PATH"
]
| [] | ["SOME_PATH", "OTHER_PATH"] | python | 2 | 0 | |
vendor/k8s.io/kubernetes/test/e2e/federated-ingress.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"net/http"
"os"
"reflect"
"strconv"
"time"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_4"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
MaxRetriesOnFederatedApiserver = 3
FederatedIngressTimeout = 60 * time.Second
FederatedIngressName = "federated-ingress"
FederatedIngressServiceName = "federated-ingress-service"
FederatedIngressServicePodName = "federated-ingress-service-test-pod"
)
var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federated-ingress")
// Create/delete ingress api objects
// Validate federation apiserver, does not rely on underlying clusters or federation ingress controller.
Describe("Ingress objects", func() {
AfterEach(func() {
nsName := f.FederationNamespace.Name
// Delete registered ingresses.
ingressList, err := f.FederationClientset_1_4.Extensions().Ingresses(nsName).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, ingress := range ingressList.Items {
err := f.FederationClientset_1_4.Extensions().Ingresses(nsName).Delete(ingress.Name, &api.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.Client)
nsName := f.FederationNamespace.Name
ingress := createIngressOrFail(f.FederationClientset_1_4, nsName)
By(fmt.Sprintf("Creation of ingress %q in namespace %q succeeded. Deleting ingress.", ingress.Name, nsName))
// Cleanup
err := f.FederationClientset_1_4.Extensions().Ingresses(nsName).Delete(ingress.Name, &api.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting ingress %q in namespace %q", ingress.Name, ingress.Namespace)
By(fmt.Sprintf("Deletion of ingress %q in namespace %q succeeded.", ingress.Name, nsName))
})
})
// e2e cases for federation ingress controller
var _ = Describe("Federated Ingresses", func() {
var (
clusters map[string]*cluster // All clusters, keyed by cluster name
primaryClusterName, federationName, ns string
jig *federationTestJig
)
// register clusters in federation apiserver
BeforeEach(func() {
framework.SkipUnlessFederated(f.Client)
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
federationName = DefaultFederationName
}
jig = newFederationTestJig(f.FederationClientset_1_4)
clusters = map[string]*cluster{}
primaryClusterName = registerClusters(clusters, UserAgentName, federationName, f)
ns = f.FederationNamespace.Name
})
AfterEach(func() {
unregisterClusters(clusters, f)
})
It("should create and update matching ingresses in underlying clusters", func() {
ingress := createIngressOrFail(f.FederationClientset_1_4, ns)
defer func() { // Cleanup
By(fmt.Sprintf("Deleting ingress %q in namespace %q", ingress.Name, ns))
err := f.FederationClientset_1_4.Ingresses(ns).Delete(ingress.Name, &api.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting ingress %q in namespace %q", ingress.Name, ns)
}()
// wait for ingress shards being created
waitForIngressShardsOrFail(ns, ingress, clusters)
ingress = updateIngressOrFail(f.FederationClientset_1_4, ns)
waitForIngressShardsUpdatedOrFail(ns, ingress, clusters)
})
var _ = Describe("Ingress connectivity and DNS", func() {
var (
service *v1.Service
)
BeforeEach(func() {
framework.SkipUnlessFederated(f.Client)
// create backend pod
createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName)
// create backend service
service = createServiceOrFail(f.FederationClientset_1_4, ns, FederatedIngressServiceName)
// create ingress object
jig.ing = createIngressOrFail(f.FederationClientset_1_4, ns)
// wait for services objects sync
waitForServiceShardsOrFail(ns, service, clusters)
// wait for ingress objects sync
waitForIngressShardsOrFail(ns, jig.ing, clusters)
})
AfterEach(func() {
deleteBackendPodsOrFail(clusters, ns)
if service != nil {
deleteServiceOrFail(f.FederationClientset_1_4, ns, service.Name)
service = nil
} else {
By("No service to delete. Service is nil")
}
if jig.ing != nil {
deleteIngressOrFail(f.FederationClientset_1_4, ns, jig.ing.Name)
jig.ing = nil
} else {
By("No ingress to delete. Ingress is nil")
}
})
PIt("should be able to discover a federated ingress service", func() {
// we are about the ingress name
svcDNSNames := []string{
fmt.Sprintf("%s.%s", FederatedIngressServiceName, ns),
fmt.Sprintf("%s.%s.svc.cluster.local.", FederatedIngressServiceName, ns),
// TODO these two entries are not set yet
//fmt.Sprintf("%s.%s.%s", FederatedIngressServiceName, ns, federationName),
//fmt.Sprintf("%s.%s.%s.svc.cluster.local.", FederatedIngressServiceName, ns, federationName),
}
// check dns records in underlying cluster
for i, DNSName := range svcDNSNames {
discoverService(f, DNSName, true, "federated-ingress-e2e-discovery-pod-"+strconv.Itoa(i))
}
// TODO check dns record in global dns server
// check the traffic on federation ingress
jig.waitForFederatedIngress()
})
})
})
})
/*
equivalent returns true if the two ingress spec are equivalent.
*/
func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool {
return reflect.DeepEqual(clusterIngress.Spec, federatedIngress.Spec)
}
/*
waitForIngressOrFail waits until a ingress is either present or absent in the cluster specified by clientset.
If the condition is not met within timout, it fails the calling test.
*/
func waitForIngressOrFail(clientset *release_1_3.Clientset, namespace string, ingress *v1beta1.Ingress, present bool, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
var clusterIngress *v1beta1.Ingress
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name)
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is absent", ingress.Name, namespace))
return true, nil // Success
}
if present && err == nil { // We want it present, and the Get succeeded, so we're all good.
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is present", ingress.Name, namespace))
return true, nil // Success
}
By(fmt.Sprintf("Ingress %q in namespace %q in cluster. Found: %v, waiting for Found: %v, trying again in %s (err=%v)", ingress.Name, namespace, clusterIngress != nil && err == nil, present, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify ingress %q in namespace %q in cluster: Present=%v", ingress.Name, namespace, present)
if present && clusterIngress != nil {
Expect(equivalentIngress(*clusterIngress, *ingress))
}
}
/*
waitForIngressShardsOrFail waits for the ingress to appear in all clusters
*/
func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, true, FederatedIngressTimeout)
}
}
/*
waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters
*/
func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
waitForIngressUpdateOrFail(c.Clientset, namespace, ingress, FederatedIngressTimeout)
}
}
/*
waitForIngressUpdateOrFail waits until a ingress is updated in the specified cluster with same spec of federated ingress.
If the condition is not met within timeout, it fails the calling test.
*/
func waitForIngressUpdateOrFail(clientset *release_1_3.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name)
if err == nil { // We want it present, and the Get succeeded, so we're all good.
if equivalentIngress(*clusterIngress, *ingress) {
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is updated", ingress.Name, namespace))
return true, nil
}
By(fmt.Sprintf("Ingress %q in namespace %q in cluster, waiting for service being updated, trying again in %s (err=%v)", ingress.Name, namespace, framework.Poll, err))
return false, nil
}
By(fmt.Sprintf("Ingress %q in namespace %q in cluster, waiting for service being updated, trying again in %s (err=%v)", ingress.Name, namespace, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify ingress %q in namespace %q in cluster", ingress.Name, namespace)
}
/*
waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters
*/
func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, false, FederatedIngressTimeout)
}
}
func deleteIngressOrFail(clientset *federation_release_1_4.Clientset, namespace string, ingressName string) {
if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteIngressOrFail: clientset: %v, namespace: %v, ingress: %v", clientset, namespace, ingressName))
}
err := clientset.Ingresses(namespace).Delete(ingressName, api.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting ingress %q from namespace %q", ingressName, namespace)
}
func createIngressOrFail(clientset *federation_release_1_4.Clientset, namespace string) *v1beta1.Ingress {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Creating federated ingress %q in namespace %q", FederatedIngressName, namespace))
ingress := &v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: FederatedIngressName,
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "testingress-service",
ServicePort: intstr.FromInt(80),
},
},
}
_, err := clientset.Extensions().Ingresses(namespace).Create(ingress)
framework.ExpectNoError(err, "Creating ingress %q in namespace %q", ingress.Name, namespace)
By(fmt.Sprintf("Successfully created federated ingress %q in namespace %q", FederatedIngressName, namespace))
return ingress
}
func updateIngressOrFail(clientset *federation_release_1_4.Clientset, namespace string) (newIng *v1beta1.Ingress) {
var err error
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
ingress := &v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: FederatedIngressName,
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "updated-testingress-service",
ServicePort: intstr.FromInt(80),
},
},
}
for MaxRetriesOnFederatedApiserver := 0; MaxRetriesOnFederatedApiserver < 3; MaxRetriesOnFederatedApiserver++ {
_, err = clientset.Extensions().Ingresses(namespace).Get(FederatedIngressName)
if err != nil {
framework.Failf("failed to get ingress %q: %v", FederatedIngressName, err)
}
newIng, err = clientset.Extensions().Ingresses(namespace).Update(ingress)
if err == nil {
describeIng(namespace)
return
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
framework.Failf("failed to update ingress %q: %v", FederatedIngressName, err)
}
}
framework.Failf("too many retries updating ingress %q", FederatedIngressName)
return newIng
}
func (j *federationTestJig) waitForFederatedIngress() {
// Wait for the loadbalancer IP.
address, err := WaitForFederatedIngressAddress(j.client, j.ing.Namespace, j.ing.Name, lbPollTimeout)
if err != nil {
framework.Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout)
}
j.address = address
framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name)
timeoutClient := &http.Client{Timeout: reqTimeout}
// Check that all rules respond to a simple GET.
for _, rules := range j.ing.Spec.Rules {
proto := "http"
for _, p := range rules.IngressRuleValue.HTTP.Paths {
route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
ExpectNoError(pollURL(route, rules.Host, lbPollTimeout, timeoutClient, false))
}
}
}
type federationTestJig struct {
// TODO add TLS check later
rootCAs map[string][]byte
address string
ing *v1beta1.Ingress
client *federation_release_1_4.Clientset
}
func newFederationTestJig(c *federation_release_1_4.Clientset) *federationTestJig {
return &federationTestJig{client: c, rootCAs: map[string][]byte{}}
}
// WaitForFederatedIngressAddress waits for the Ingress to acquire an address.
func WaitForFederatedIngressAddress(c *federation_release_1_4.Clientset, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getFederatedIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
framework.Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
return false, nil
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// getFederatedIngressAddress returns the ips/hostnames associated with the Ingress.
func getFederatedIngressAddress(client *federation_release_1_4.Clientset, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingresses(ns).Get(name)
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}
| [
"\"FEDERATION_NAME\""
]
| []
| [
"FEDERATION_NAME"
]
| [] | ["FEDERATION_NAME"] | go | 1 | 0 | |
repository_test.go | package badman_test
import (
"encoding/binary"
"math/rand"
"net"
"os"
"testing"
"time"
"github.com/google/uuid"
"github.com/m-mizutani/badman"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInMemoryRepository(t *testing.T) {
repo := badman.NewInMemoryRepository()
repositoryCommonTest(repo, t)
}
func TestDynamoRepository(t *testing.T) {
region, tableName := os.Getenv("TABLE_REGION"), os.Getenv("TABLE_NAME")
if region == "" || tableName == "" {
t.Skip("TABLE_REGION or TABLE_NAME is not available")
}
repo := badman.NewDynamoRepository(region, tableName)
repositoryCommonTest(repo, t)
}
func repositoryCommonTest(repo badman.Repository, t *testing.T) {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
ip := make(net.IP, 4)
binary.BigEndian.PutUint32(ip, rnd.Uint32())
addr1 := ip.String()
domain1 := uuid.New().String() + ".blue.example.com"
domain2 := uuid.New().String() + ".orange.example.com"
e1 := badman.BadEntity{
Name: addr1,
SavedAt: time.Now(),
Src: "tester1",
}
e2 := badman.BadEntity{
Name: domain1,
SavedAt: time.Now(),
Src: "tester2",
}
e3 := badman.BadEntity{
Name: domain1,
SavedAt: time.Now(),
Src: "tester3",
}
e4 := badman.BadEntity{
Name: domain2,
SavedAt: time.Now(),
Src: "tester3",
}
// No entity in repository
r1, err := repo.Get(addr1)
require.NoError(t, err)
assert.Nil(t, r1)
r2, err := repo.Get(domain1)
require.NoError(t, err)
assert.Nil(t, r2)
// Insert entities
require.NoError(t, repo.Put([]*badman.BadEntity{&e1, &e2, &e3, &e4}))
// Get operations
r3, err := repo.Get(addr1)
require.NoError(t, err)
assert.NotNil(t, r3)
require.Equal(t, 1, len(r3))
assert.Equal(t, addr1, r3[0].Name)
r4, err := repo.Get(domain1)
require.NoError(t, err)
assert.NotNil(t, r4)
require.Equal(t, 2, len(r4))
assert.Equal(t, domain1, r4[0].Name)
assert.Equal(t, domain1, r4[1].Name)
if r4[0].Src == "tester2" {
assert.Equal(t, "tester3", r4[1].Src)
} else {
assert.Equal(t, "tester2", r4[1].Src)
}
// Delete operation
r5, err := repo.Get(domain2)
require.NoError(t, err)
assert.NotNil(t, r5)
require.Equal(t, 1, len(r5))
assert.Equal(t, domain2, r5[0].Name)
err = repo.Del(domain2)
require.NoError(t, err)
r6, err := repo.Get(domain2)
require.NoError(t, err)
assert.Equal(t, 0, len(r6))
// Dump operation
ch := repo.Dump()
if ch != nil {
counter := map[string]int{}
for q := range repo.Dump() {
require.NoError(t, q.Error)
for _, e := range q.Entities {
counter[e.Name]++
}
}
assert.Equal(t, 1, counter[addr1])
assert.Equal(t, 2, counter[domain1])
assert.Equal(t, 0, counter[domain2])
}
}
| [
"\"TABLE_REGION\"",
"\"TABLE_NAME\""
]
| []
| [
"TABLE_NAME",
"TABLE_REGION"
]
| [] | ["TABLE_NAME", "TABLE_REGION"] | go | 2 | 0 | |
internal/config/network.go | package config
import (
"encoding/json"
"net"
"os"
"strconv"
"strings"
)
func initNetworkIDs() {
cache.networkIDs = strings.Split(os.Getenv("SYNTROPY_NETWORK_IDS"), ",")
}
func initPortsRange() {
const maxPort = 65535
cache.portsRange.start = 0
cache.portsRange.end = 0
strport := strings.Split(os.Getenv("SYNTROPY_PORT_RANGE"), "-")
if len(strport) != 2 {
return
}
p1, e1 := strconv.Atoi(strport[0])
p2, e2 := strconv.Atoi(strport[1])
if e1 != nil || e2 != nil ||
p1 <= 0 || p2 <= 0 ||
p1 > maxPort || p2 > maxPort {
return
}
// expect users to set range correctly, but still validate
if p2 > p1 {
cache.portsRange.start = uint16(p1)
cache.portsRange.end = uint16(p2)
} else {
cache.portsRange.start = uint16(p2)
cache.portsRange.end = uint16(p1)
}
}
func initAllowedIPs() {
cache.allowedIPs = []AllowedIPEntry{}
str := os.Getenv("SYNTROPY_ALLOWED_IPS")
var objMap []map[string]string
err := json.Unmarshal([]byte(str), &objMap)
if err != nil {
return
}
for _, pair := range objMap {
for k, v := range pair {
// A very simple CIDR validation
_, _, err := net.ParseCIDR(k)
if err != nil {
continue
}
cache.allowedIPs = append(cache.allowedIPs, AllowedIPEntry{
Name: v,
Subnet: k,
})
}
}
}
func initMTU() {
cache.mtu = 0 // default value - auto
mtu, err := strconv.Atoi(os.Getenv("SYNTROPY_MTU"))
if err != nil {
return
}
if mtu < 0 {
return
}
cache.mtu = uint32(mtu)
}
func initIptables() {
cache.createIptablesRules = true
if strings.ToLower(os.Getenv("SYNTROPY_CREATE_IPTABLES_RULES")) == "disabled" {
cache.createIptablesRules = false
}
}
| [
"\"SYNTROPY_NETWORK_IDS\"",
"\"SYNTROPY_PORT_RANGE\"",
"\"SYNTROPY_ALLOWED_IPS\"",
"\"SYNTROPY_MTU\"",
"\"SYNTROPY_CREATE_IPTABLES_RULES\""
]
| []
| [
"SYNTROPY_ALLOWED_IPS",
"SYNTROPY_MTU",
"SYNTROPY_PORT_RANGE",
"SYNTROPY_CREATE_IPTABLES_RULES",
"SYNTROPY_NETWORK_IDS"
]
| [] | ["SYNTROPY_ALLOWED_IPS", "SYNTROPY_MTU", "SYNTROPY_PORT_RANGE", "SYNTROPY_CREATE_IPTABLES_RULES", "SYNTROPY_NETWORK_IDS"] | go | 5 | 0 | |
vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go | // +build !windows
package homedir
// Copyright 2013-2018 Docker, Inc.
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
"errors"
"os"
"path/filepath"
"strings"
"github.com/containers/storage/pkg/unshare"
)
// Key returns the env var name for the user's home dir based on
// the platform being run on
func Key() string {
return "HOME"
}
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
//
// If linking statically with cgo enabled against glibc, ensure the
// osusergo build tag is used.
//
// If needing to do nss lookups, do not disable cgo or set osusergo.
func Get() string {
homedir, _ := unshare.HomeDir()
return homedir
}
// GetShortcutString returns the string that is shortcut to user's home directory
// in the native shell of the platform running on.
func GetShortcutString() string {
return "~"
}
// GetRuntimeDir returns XDG_RUNTIME_DIR.
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func GetRuntimeDir() (string, error) {
if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
return xdgRuntimeDir, nil
}
return "", errors.New("could not get XDG_RUNTIME_DIR")
}
// StickRuntimeDirContents sets the sticky bit on files that are under
// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
//
// StickyRuntimeDir returns slice of sticked files.
// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
//
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
func StickRuntimeDirContents(files []string) ([]string, error) {
runtimeDir, err := GetRuntimeDir()
if err != nil {
// ignore error if runtimeDir is empty
return nil, nil
}
runtimeDir, err = filepath.Abs(runtimeDir)
if err != nil {
return nil, err
}
var sticked []string
for _, f := range files {
f, err = filepath.Abs(f)
if err != nil {
return sticked, err
}
if strings.HasPrefix(f, runtimeDir+"/") {
if err = stick(f); err != nil {
return sticked, err
}
sticked = append(sticked, f)
}
}
return sticked, nil
}
func stick(f string) error {
st, err := os.Stat(f)
if err != nil {
return err
}
m := st.Mode()
m |= os.ModeSticky
return os.Chmod(f, m)
}
| [
"\"XDG_RUNTIME_DIR\""
]
| []
| [
"XDG_RUNTIME_DIR"
]
| [] | ["XDG_RUNTIME_DIR"] | go | 1 | 0 | |
seahub/settings.py | # Copyright (c) 2012-2016 Seafile Ltd.
# -*- coding: utf-8 -*-
# Django settings for seahub project.
import sys
import os
import re
from seaserv import FILE_SERVER_PORT
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), os.pardir)
DEBUG = False
SERVICE_URL = 'http://127.0.0.1:8000'
FILE_SERVER_ROOT = 'http://127.0.0.1:' + FILE_SERVER_PORT
CLOUD_MODE = False
MULTI_TENANCY = False
ADMINS = [
# ('Your Name', '[email protected]'),
]
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '%s/seahub/seahub.db' % PROJECT_ROOT, # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# New in Django 3.2
# Default primary key field type to use for models that don’t have a field with primary_key=True.
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = '%s/media/' % PROJECT_ROOT
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '%s/assets/' % MEDIA_ROOT
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/media/assets/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'%s/static' % PROJECT_ROOT,
'%s/frontend/build' % PROJECT_ROOT,
)
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'frontend/',
'STATS_FILE': os.path.join(PROJECT_ROOT, 'frontend/webpack-stats.pro.json'),
}
}
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# StaticI18N config
STATICI18N_ROOT = '%s/static/scripts' % PROJECT_ROOT
STATICI18N_OUTPUT_DIR = 'i18n'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n*v0=jz-1rz@(4gx^tf%6^e7c&um@2)g-l=3_)t@19a69n1nv6'
ENABLE_REMOTE_USER_AUTHENTICATION = False
# Order is important
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'seahub.auth.middleware.AuthenticationMiddleware',
'seahub.base.middleware.BaseMiddleware',
'seahub.base.middleware.InfobarMiddleware',
'seahub.password_session.middleware.CheckPasswordHash',
'seahub.base.middleware.ForcePasswdChangeMiddleware',
'termsandconditions.middleware.TermsAndConditionsRedirectMiddleware',
'seahub.two_factor.middleware.OTPMiddleware',
'seahub.two_factor.middleware.ForceTwoFactorAuthMiddleware',
'seahub.trusted_ip.middleware.LimitIpMiddleware',
'seahub.organizations.middleware.RedirectMiddleware',
]
SITE_ROOT_URLCONF = 'seahub.urls'
ROOT_URLCONF = 'seahub.utils.rooturl'
SITE_ROOT = '/'
CSRF_COOKIE_NAME = 'sfcsrftoken'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'seahub.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, '../../seahub-data/custom/templates'),
os.path.join(PROJECT_ROOT, 'seahub/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'seahub.auth.context_processors.auth',
'seahub.base.context_processors.base',
'seahub.base.context_processors.debug',
],
},
},
]
LANGUAGES = [
# ('bg', gettext_noop(u'български език')),
('ca', 'Català'),
('cs', 'Čeština'),
('de', 'Deutsch'),
('en', 'English'),
('es', 'Español'),
('es-ar', 'Español de Argentina'),
('es-mx', 'Español de México'),
('fr', 'Français'),
('it', 'Italiano'),
('is', 'Íslenska'),
('lv', 'Latvian'),
# ('mk', 'македонски јазик'),
('hu', 'Magyar'),
('nl', 'Nederlands'),
('pl', 'Polski'),
('pt-br', 'Portuguese, Brazil'),
('ru', 'Русский'),
# ('sk', 'Slovak'),
('sl', 'Slovenian'),
('fi', 'Suomi'),
('sv', 'Svenska'),
('vi', 'Tiếng Việt'),
('tr', 'Türkçe'),
('uk', 'українська мова'),
('he', 'עברית'),
('ar', 'العربية'),
('el', 'ελληνικά'),
('th', 'ไทย'),
('ko', '한국어'),
('ja', '日本語'),
# ('lt', 'Lietuvių kalba'),
('zh-cn', '简体中文'),
('zh-tw', '繁體中文'),
]
LOCALE_PATHS = [
os.path.join(PROJECT_ROOT, 'locale'),
os.path.join(PROJECT_ROOT, 'seahub/trusted_ip/locale'),
]
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# In order to overide command `createsuperuser`, base app *must* before auth app.
# ref: https://docs.djangoproject.com/en/1.11/howto/custom-management-commands/#overriding-commands
'seahub.base',
'django.contrib.auth',
'registration',
'captcha',
'statici18n',
'constance',
'constance.backends.database',
'termsandconditions',
'webpack_loader',
'seahub.api2',
'seahub.avatar',
'seahub.contacts',
'seahub.drafts',
'seahub.institutions',
'seahub.invitations',
'seahub.wiki',
'seahub.group',
'seahub.notifications',
'seahub.options',
'seahub.onlyoffice',
'seahub.profile',
'seahub.share',
'seahub.help',
'seahub.thumbnail',
'seahub.password_session',
'seahub.admin_log',
'seahub.wopi',
'seahub.tags',
'seahub.revision_tag',
'seahub.two_factor',
'seahub.role_permissions',
'seahub.trusted_ip',
'seahub.repo_tags',
'seahub.file_tags',
'seahub.related_files',
'seahub.work_weixin',
'seahub.dingtalk',
'seahub.file_participants',
'seahub.repo_api_tokens',
'seahub.abuse_reports',
'seahub.repo_auto_delete',
'seahub.ocm',
'seahub.ocm_via_webdav',
'seahub.search',
'seahub.sysadmin_extra',
'seahub.organizations',
'seahub.krb5_auth',
'seahub.django_cas_ng',
]
# Enable or disable view File Scan
ENABLE_FILE_SCAN = False
# Enable or disable multiple storage backends.
ENABLE_STORAGE_CLASSES = False
# `USER_SELECT` or `ROLE_BASED` or `REPO_ID_MAPPING`
STORAGE_CLASS_MAPPING_POLICY = 'USER_SELECT'
# Enable or disable constance(web settings).
ENABLE_SETTINGS_VIA_WEB = True
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_DATABASE_CACHE_BACKEND = 'default'
AUTHENTICATION_BACKENDS = (
'seahub.base.accounts.AuthBackend',
)
ENABLE_CAS = False
ENABLE_ADFS_LOGIN = False
ENABLE_OAUTH = False
ENABLE_WATERMARK = False
ENABLE_SHOW_CONTACT_EMAIL_WHEN_SEARCH_USER = False
# enable work weixin
ENABLE_WORK_WEIXIN = False
# enable weixin
ENABLE_WEIXIN = False
# enable dingtalk
ENABLE_DINGTALK = False
# allow user to clean library trash
ENABLE_USER_CLEAN_TRASH = True
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/accounts/login/'
LOGIN_ERROR_DETAILS = False
LOGOUT_REDIRECT_URL = None
ACCOUNT_ACTIVATION_DAYS = 7
# allow seafile admin view user's repo
ENABLE_SYS_ADMIN_VIEW_REPO = False
# allow seafile admin generate user auth token
ENABLE_SYS_ADMIN_GENERATE_USER_AUTH_TOKEN = False
# allow search from LDAP directly during auto-completion (not only search imported users)
ENABLE_SEARCH_FROM_LDAP_DIRECTLY = False
# show traffic on the UI
SHOW_TRAFFIC = True
# show or hide library 'download' button
SHOW_REPO_DOWNLOAD_BUTTON = False
# enable 'upload folder' or not
ENABLE_UPLOAD_FOLDER = True
# enable resumable fileupload or not
ENABLE_RESUMABLE_FILEUPLOAD = False
RESUMABLE_UPLOAD_FILE_BLOCK_SIZE = 8
## maxNumberOfFiles for fileupload
MAX_NUMBER_OF_FILES_FOR_FILEUPLOAD = 1000
# enable encrypt library
ENABLE_ENCRYPTED_LIBRARY = True
ENCRYPTED_LIBRARY_VERSION = 2
# enable reset encrypt library's password when user forget password
ENABLE_RESET_ENCRYPTED_REPO_PASSWORD = False
# mininum length for password of encrypted library
REPO_PASSWORD_MIN_LENGTH = 8
# token length for the share link
SHARE_LINK_TOKEN_LENGTH = 20
# if limit only authenticated user can view preview share link
SHARE_LINK_LOGIN_REQUIRED = False
# min/max expire days for a share link
SHARE_LINK_EXPIRE_DAYS_MIN = 0 # 0 means no limit
SHARE_LINK_EXPIRE_DAYS_MAX = 0 # 0 means no limit
# default expire days should be
# greater than or equal to MIN and less than or equal to MAX
SHARE_LINK_EXPIRE_DAYS_DEFAULT = 0
# min/max expire days for an upload link
UPLOAD_LINK_EXPIRE_DAYS_MIN = 0 # 0 means no limit
UPLOAD_LINK_EXPIRE_DAYS_MAX = 0 # 0 means no limit
# default expire days should be
# greater than or equal to MIN and less than or equal to MAX
UPLOAD_LINK_EXPIRE_DAYS_DEFAULT = 0
# force use password when generate a share/upload link
SHARE_LINK_FORCE_USE_PASSWORD = False
# mininum length for the password of a share/upload link
SHARE_LINK_PASSWORD_MIN_LENGTH = 10
# LEVEL for the password of a share/upload link
# based on four types of input:
# num, upper letter, lower letter, other symbols
# '3' means password must have at least 3 types of the above.
SHARE_LINK_PASSWORD_STRENGTH_LEVEL = 1
# enable or disable share link audit
ENABLE_SHARE_LINK_AUDIT = False
# enable or disable report abuse file on share link page
ENABLE_SHARE_LINK_REPORT_ABUSE = False
# share link audit code timeout
SHARE_LINK_AUDIT_CODE_TIMEOUT = 60 * 60
# enable or disable limit ip
ENABLE_LIMIT_IPADDRESS = False
TRUSTED_IP_LIST = ['127.0.0.1']
# Control the language that send email. Default to user's current language.
SHARE_LINK_EMAIL_LANGUAGE = ''
# check virus for files uploaded form upload link
ENABLE_UPLOAD_LINK_VIRUS_CHECK = False
# mininum length for user's password
USER_PASSWORD_MIN_LENGTH = 6
# LEVEL based on four types of input:
# num, upper letter, lower letter, other symbols
# '3' means password must have at least 3 types of the above.
USER_PASSWORD_STRENGTH_LEVEL = 3
# default False, only check USER_PASSWORD_MIN_LENGTH
# when True, check password strength level, STRONG(or above) is allowed
USER_STRONG_PASSWORD_REQUIRED = False
# Force user to change password when admin add/reset a user.
FORCE_PASSWORD_CHANGE = True
# Enable a user to change password in 'settings' page.
ENABLE_CHANGE_PASSWORD = True
# Enable a user to get auth token in 'settings' page.
ENABLE_GET_AUTH_TOKEN_BY_SESSION = False
ENABLE_DELETE_ACCOUNT = True
ENABLE_UPDATE_USER_INFO = True
# Enable or disable repo history setting
ENABLE_REPO_HISTORY_SETTING = True
DISABLE_SYNC_WITH_ANY_FOLDER = False
ENABLE_TERMS_AND_CONDITIONS = False
# Enable or disable sharing to all groups
ENABLE_SHARE_TO_ALL_GROUPS = False
# Enable or disable sharing to departments
ENABLE_SHARE_TO_DEPARTMENT = True
# interval for request unread notifications
UNREAD_NOTIFICATIONS_REQUEST_INTERVAL = 3 * 60 # seconds
# Enable file comments
ENABLE_FILE_COMMENT = True
# Enable seafile docs
ENABLE_SEAFILE_DOCS = False
# File preview
FILE_PREVIEW_MAX_SIZE = 30 * 1024 * 1024
FILE_ENCODING_LIST = ['auto', 'utf-8', 'gbk', 'ISO-8859-1', 'ISO-8859-5']
FILE_ENCODING_TRY_LIST = ['utf-8', 'gbk']
HIGHLIGHT_KEYWORD = False # If True, highlight the keywords in the file when the visit is via clicking a link in 'search result' page.
# extensions of previewed files
TEXT_PREVIEW_EXT = """ac, am, bat, c, cc, cmake, cpp, cs, css, diff, el, h, html, htm, java, js, json, less, make, org, php, pl, properties, py, rb, scala, script, sh, sql, txt, text, tex, vi, vim, xhtml, xml, log, csv, groovy, rst, patch, go, yml"""
# Common settings(file extension, storage) for avatar and group avatar.
AVATAR_FILE_STORAGE = '' # Replace with 'seahub.base.database_storage.DatabaseStorage' if save avatar files to database
AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png', '.jpeg', '.gif')
# Avatar
AVATAR_STORAGE_DIR = 'avatars'
AVATAR_HASH_USERDIRNAMES = True
AVATAR_HASH_FILENAMES = True
AVATAR_GRAVATAR_BACKUP = False
AVATAR_DEFAULT_URL = '/avatars/default.png'
AVATAR_DEFAULT_NON_REGISTERED_URL = '/avatars/default-non-register.jpg'
AVATAR_MAX_AVATARS_PER_USER = 1
AVATAR_CACHE_TIMEOUT = 14 * 24 * 60 * 60
AUTO_GENERATE_AVATAR_SIZES = (16, 20, 24, 28, 32, 36, 40, 42, 48, 60, 64, 72, 80, 84, 96, 128, 160)
# Group avatar
GROUP_AVATAR_STORAGE_DIR = 'avatars/groups'
GROUP_AVATAR_DEFAULT_URL = 'avatars/groups/default.png'
AUTO_GENERATE_GROUP_AVATAR_SIZES = (20, 24, 32, 36, 48, 56)
LOG_DIR = os.environ.get('SEAHUB_LOG_DIR', '/tmp')
CACHE_DIR = "/tmp"
install_topdir = os.path.expanduser(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
central_conf_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR', '')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': os.path.join(CACHE_DIR, 'seahub_cache'),
'OPTIONS': {
'MAX_ENTRIES': 1000000
}
},
# Compatible with existing `COMPRESS_CACHE_BACKEND` setting after
# upgrading to django-compressor v2.2.
# ref: https://manual.seafile.com/deploy_pro/deploy_in_a_cluster.html
'django.core.cache.backends.locmem.LocMemCache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
# rest_framwork
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
],
'DEFAULT_THROTTLE_RATES': {
'ping': '3000/minute',
'anon': '60/minute',
'user': '3000/minute',
},
# https://github.com/tomchristie/django-rest-framework/issues/2891
'UNICODE_JSON': False,
}
REST_FRAMEWORK_THROTTING_WHITELIST = []
# file and path
GET_FILE_HISTORY_TIMEOUT = 10 * 60 # seconds
MAX_UPLOAD_FILE_NAME_LEN = 255
MAX_FILE_NAME = MAX_UPLOAD_FILE_NAME_LEN
MAX_PATH = 4096
FILE_LOCK_EXPIRATION_DAYS = 0
# Whether or not activate user when registration complete.
# If set to ``False``, new user will be activated by admin or via activate link.
ACTIVATE_AFTER_REGISTRATION = True
# Whether or not send activation Email to user when registration complete.
# This option will be ignored if ``ACTIVATE_AFTER_REGISTRATION`` set to ``True``.
REGISTRATION_SEND_MAIL = False
# Whether or not send notify email to sytem admins when user registered or
# first login through Shibboleth.
NOTIFY_ADMIN_AFTER_REGISTRATION = False
# Whether or not activate inactive user on first login. Mainly used in LDAP user sync.
ACTIVATE_AFTER_FIRST_LOGIN = False
REQUIRE_DETAIL_ON_REGISTRATION = False
# Account initial password, for password resetting.
# INIT_PASSWD can either be a string, or a function (function has to be set without the brackets)
def genpassword():
from django.utils.crypto import get_random_string
return get_random_string(10)
INIT_PASSWD = genpassword
# browser tab title
SITE_TITLE = 'Private Seafile'
# html head meta tag for search engine preview text
SITE_DESCRIPTION = ''
# Base name used in email sending
SITE_NAME = 'Seafile'
# Path to the license file(relative to the media path)
LICENSE_PATH = os.path.join(PROJECT_ROOT, '../../seafile-license.txt')
# Path to the background image file of login page(relative to the media path)
LOGIN_BG_IMAGE_PATH = 'img/login-bg.jpg'
# Path to the favicon file (relative to the media path)
# tip: use a different name when modify it.
FAVICON_PATH = 'favicons/favicon.png'
APPLE_TOUCH_ICON_PATH = 'favicons/favicon.png'
# Path to the Logo Imagefile (relative to the media path)
LOGO_PATH = 'img/seafile-logo.png'
# logo size. the unit is 'px'
LOGO_WIDTH = ''
LOGO_HEIGHT = 32
CUSTOM_LOGO_PATH = 'custom/mylogo.png'
CUSTOM_FAVICON_PATH = 'custom/favicon.ico'
CUSTOM_LOGIN_BG_PATH = 'custom/login-bg.jpg'
# used before version 6.3: the relative path of css file under seahub-data (e.g. custom/custom.css)
BRANDING_CSS = ''
# used in 6.3+, enable setting custom css via admin web interface
ENABLE_BRANDING_CSS = False
# Using Django to server static file. Set to `False` if deployed behide a web
# server.
SERVE_STATIC = True
# Enable or disable registration on web.
ENABLE_SIGNUP = False
# show 'log out' icon in top-bar or not.
SHOW_LOGOUT_ICON = False
# privacy policy link and service link
PRIVACY_POLICY_LINK = ''
TERMS_OF_SERVICE_LINK = ''
# For security consideration, please set to match the host/domain of your site, e.g., ALLOWED_HOSTS = ['.example.com'].
# Please refer https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts for details.
ALLOWED_HOSTS = ['*']
# Logging
LOGGING = {
'version': 1,
# Enable existing loggers so that gunicorn errors will be bubbled up when
# server side error page "Internal Server Error" occurs.
# ref: https://www.caktusgroup.com/blog/2015/01/27/Django-Logging-Configuration-logging_config-default-settings-logger/
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s:%(lineno)s %(funcName)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
'default': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_DIR, 'seahub.log'),
'maxBytes': 1024*1024*100, # 100 MB
'backupCount': 5,
'formatter': 'standard',
},
'onlyoffice_handler': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_DIR, 'onlyoffice.log'),
'maxBytes': 1024*1024*100, # 100 MB
'backupCount': 5,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
},
'django.request': {
'handlers': ['default', 'mail_admins'],
'level': 'INFO',
'propagate': False
},
'py.warnings': {
'handlers': ['console', ],
'level': 'INFO',
'propagate': False
},
'onlyoffice': {
'handlers': ['onlyoffice_handler', ],
'level': 'INFO',
'propagate': False
},
}
}
#Login Attempt
LOGIN_ATTEMPT_LIMIT = 5
LOGIN_ATTEMPT_TIMEOUT = 15 * 60 # in seconds (default: 15 minutes)
FREEZE_USER_ON_LOGIN_FAILED = False # deactivate user account when login attempts exceed limit
# Age of cookie, in seconds (default: 1 day).
SESSION_COOKIE_AGE = 24 * 60 * 60
# Days of remembered login info (deafult: 7 days)
LOGIN_REMEMBER_DAYS = 7
SEAFILE_VERSION = '6.3.3'
CAPTCHA_IMAGE_SIZE = (90, 42)
###################
# Image Thumbnail #
###################
# Absolute filesystem path to the directory that will hold thumbnail files.
SEAHUB_DATA_ROOT = os.path.join(PROJECT_ROOT, '../../seahub-data')
if os.path.exists(SEAHUB_DATA_ROOT):
THUMBNAIL_ROOT = os.path.join(SEAHUB_DATA_ROOT, 'thumbnail')
else:
THUMBNAIL_ROOT = os.path.join(PROJECT_ROOT, 'seahub/thumbnail/thumb')
THUMBNAIL_EXTENSION = 'png'
# for thumbnail: height(px) and width(px)
THUMBNAIL_DEFAULT_SIZE = 48
THUMBNAIL_SIZE_FOR_GRID = 192
THUMBNAIL_SIZE_FOR_ORIGINAL = 1024
# size(MB) limit for generate thumbnail
THUMBNAIL_IMAGE_SIZE_LIMIT = 30
THUMBNAIL_IMAGE_ORIGINAL_SIZE_LIMIT = 256
# video thumbnails
ENABLE_VIDEO_THUMBNAIL = False
THUMBNAIL_VIDEO_FRAME_TIME = 5 # use the frame at 5 second as thumbnail
# template for create new office file
OFFICE_TEMPLATE_ROOT = os.path.join(MEDIA_ROOT, 'office-template')
ENABLE_WEBDAV_SECRET = False
WEBDAV_SECRET_MIN_LENGTH = 1
WEBDAV_SECRET_STRENGTH_LEVEL = 1
ENABLE_USER_SET_CONTACT_EMAIL = False
#####################
# Global AddressBook #
#####################
ENABLE_GLOBAL_ADDRESSBOOK = True
ENABLE_ADDRESSBOOK_OPT_IN = False
####################
# Guest Invite #
####################
ENABLE_GUEST_INVITATION = False
INVITATION_ACCEPTER_BLACKLIST = []
########################
# Security Enhancements #
########################
ENABLE_SUDO_MODE = True
FILESERVER_TOKEN_ONCE_ONLY = True
#################
# Email sending #
#################
SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER = True # Whether to send email when a system staff adding new member.
SEND_EMAIL_ON_RESETTING_USER_PASSWD = True # Whether to send email when a system staff resetting user's password.
##########################
# Settings for Extra App #
##########################
##########################
# Settings for frontend #
##########################
SEAFILE_COLLAB_SERVER = ''
##########################
# Settings for dtable web #
##########################
DTABLE_WEB_SERVER = ''
############################
# Settings for Seahub Priv #
############################
# Replace from email to current user instead of email sender.
REPLACE_FROM_EMAIL = False
# Add ``Reply-to`` header, see RFC #822.
ADD_REPLY_TO_HEADER = False
ENABLE_DEMO_USER = False
CLOUD_DEMO_USER = '[email protected]'
ENABLE_TWO_FACTOR_AUTH = False
OTP_LOGIN_URL = '/profile/two_factor_authentication/setup/'
TWO_FACTOR_DEVICE_REMEMBER_DAYS = 90
ENABLE_FORCE_2FA_TO_ALL_USERS = False
# Enable wiki
ENABLE_WIKI = True
# Enable 'repo snapshot label' feature
ENABLE_REPO_SNAPSHOT_LABEL = False
# Repo wiki mode
ENABLE_REPO_WIKI_MODE = True
############################
# HU berlin additional #
############################
# ADDITIONAL_SHARE_DIALOG_NOTE = {
# 'title': 'Attention! Read before shareing files:',
# 'content': 'Do not share personal or confidential official data with **.'
# }
ADDITIONAL_SHARE_DIALOG_NOTE = None
# ADDITIONAL_APP_BOTTOM_LINKS = {
# 'seafile': 'http://dev.seahub.com/seahub',
# 'dtable-web': 'http://dev.seahub.com/dtable-web'
# }
ADDITIONAL_APP_BOTTOM_LINKS = None
# ADDITIONAL_ABOUT_DIALOG_LINKS = {
# 'seafile': 'http://dev.seahub.com/seahub',
# 'dtable-web': 'http://dev.seahub.com/dtable-web'
# }
ADDITIONAL_ABOUT_DIALOG_LINKS = None
############################
# Settings for SeafileDocs #
############################
if os.environ.get('SEAFILE_DOCS', None):
LOGO_PATH = 'img/seafile-docs-logo.png'
LOGO_WIDTH = ''
ENABLE_WIKI = True
d = os.path.dirname
EVENTS_CONFIG_FILE = os.environ.get(
'EVENTS_CONFIG_FILE',
os.path.join(
d(d(d(d(os.path.abspath(__file__))))), 'conf', 'seafevents.conf'
)
)
del d
if not os.path.exists(EVENTS_CONFIG_FILE):
del EVENTS_CONFIG_FILE
#####################
# External settings #
#####################
def load_local_settings(module):
'''Import any symbols that begin with A-Z. Append to lists any symbols
that begin with "EXTRA_".
'''
if hasattr(module, 'HTTP_SERVER_ROOT'):
if not hasattr(module, 'FILE_SERVER_ROOT'):
module.FILE_SERVER_ROOT = module.HTTP_SERVER_ROOT
del module.HTTP_SERVER_ROOT
for attr in dir(module):
match = re.search('^EXTRA_(\w+)', attr)
if match:
name = match.group(1)
value = getattr(module, attr)
try:
globals()[name] += value
except KeyError:
globals()[name] = value
elif re.search('^[A-Z]', attr):
globals()[attr] = getattr(module, attr)
# Load local_settings.py
try:
import seahub.local_settings
except ImportError:
pass
else:
load_local_settings(seahub.local_settings)
del seahub.local_settings
# Load seahub_settings.py in server release
try:
if os.path.exists(central_conf_dir):
sys.path.insert(0, central_conf_dir)
import seahub_settings
except ImportError:
pass
else:
# In server release, sqlite3 db file is <topdir>/seahub.db
DATABASES['default']['NAME'] = os.path.join(install_topdir, 'seahub.db')
# In server release, gunicorn is used to deploy seahub
INSTALLED_APPS.append('gunicorn')
load_local_settings(seahub_settings)
del seahub_settings
# Remove install_topdir from path
sys.path.pop(0)
# Following settings are private, can not be overwrite.
INNER_FILE_SERVER_ROOT = 'http://127.0.0.1:' + FILE_SERVER_PORT
CONSTANCE_ENABLED = ENABLE_SETTINGS_VIA_WEB
CONSTANCE_CONFIG = {
'SERVICE_URL': (SERVICE_URL, ''),
'FILE_SERVER_ROOT': (FILE_SERVER_ROOT, ''),
'DISABLE_SYNC_WITH_ANY_FOLDER': (DISABLE_SYNC_WITH_ANY_FOLDER, ''),
'ENABLE_SIGNUP': (ENABLE_SIGNUP, ''),
'ACTIVATE_AFTER_REGISTRATION': (ACTIVATE_AFTER_REGISTRATION, ''),
'REGISTRATION_SEND_MAIL': (REGISTRATION_SEND_MAIL, ''),
'LOGIN_REMEMBER_DAYS': (LOGIN_REMEMBER_DAYS, ''),
'LOGIN_ATTEMPT_LIMIT': (LOGIN_ATTEMPT_LIMIT, ''),
'FREEZE_USER_ON_LOGIN_FAILED': (FREEZE_USER_ON_LOGIN_FAILED, ''),
'ENABLE_ENCRYPTED_LIBRARY': (ENABLE_ENCRYPTED_LIBRARY, ''),
'REPO_PASSWORD_MIN_LENGTH': (REPO_PASSWORD_MIN_LENGTH, ''),
'ENABLE_REPO_HISTORY_SETTING': (ENABLE_REPO_HISTORY_SETTING, ''),
'FORCE_PASSWORD_CHANGE': (FORCE_PASSWORD_CHANGE, ''),
'USER_STRONG_PASSWORD_REQUIRED': (USER_STRONG_PASSWORD_REQUIRED, ''),
'USER_PASSWORD_MIN_LENGTH': (USER_PASSWORD_MIN_LENGTH, ''),
'USER_PASSWORD_STRENGTH_LEVEL': (USER_PASSWORD_STRENGTH_LEVEL, ''),
'SHARE_LINK_TOKEN_LENGTH': (SHARE_LINK_TOKEN_LENGTH, ''),
'SHARE_LINK_FORCE_USE_PASSWORD': (SHARE_LINK_FORCE_USE_PASSWORD, ''),
'SHARE_LINK_PASSWORD_MIN_LENGTH': (SHARE_LINK_PASSWORD_MIN_LENGTH, ''),
'SHARE_LINK_PASSWORD_STRENGTH_LEVEL': (SHARE_LINK_PASSWORD_STRENGTH_LEVEL, ''),
'ENABLE_TWO_FACTOR_AUTH': (ENABLE_TWO_FACTOR_AUTH, ''),
'TEXT_PREVIEW_EXT': (TEXT_PREVIEW_EXT, ''),
'ENABLE_SHARE_TO_ALL_GROUPS': (ENABLE_SHARE_TO_ALL_GROUPS, ''),
'SITE_NAME': (SITE_NAME, ''),
'SITE_TITLE': (SITE_TITLE, ''),
'ENABLE_BRANDING_CSS': (ENABLE_BRANDING_CSS, ''),
'CUSTOM_CSS': ('', ''),
'ENABLE_TERMS_AND_CONDITIONS': (ENABLE_TERMS_AND_CONDITIONS, ''),
'ENABLE_USER_CLEAN_TRASH': (ENABLE_USER_CLEAN_TRASH, ''),
}
# if Seafile admin enable remote user authentication in conf/seahub_settings.py
# then add 'seahub.auth.middleware.SeafileRemoteUserMiddleware' and
# 'seahub.auth.backends.SeafileRemoteUserBackend' to settings.
if ENABLE_REMOTE_USER_AUTHENTICATION:
MIDDLEWARE.append('seahub.auth.middleware.SeafileRemoteUserMiddleware')
AUTHENTICATION_BACKENDS += ('seahub.auth.backends.SeafileRemoteUserBackend',)
if ENABLE_OAUTH or ENABLE_WORK_WEIXIN or ENABLE_WEIXIN or ENABLE_DINGTALK:
AUTHENTICATION_BACKENDS += ('seahub.oauth.backends.OauthRemoteUserBackend',)
if ENABLE_CAS:
AUTHENTICATION_BACKENDS += ('seahub.django_cas_ng.backends.CASBackend',)
if ENABLE_ADFS_LOGIN:
AUTHENTICATION_BACKENDS += ('seahub.adfs_auth.backends.Saml2Backend',)
#####################
# Custom Nav Items #
#####################
# an example:
# CUSTOM_NAV_ITEMS = [
# {'icon': 'sf2-icon-star',
# 'desc': 'test custom name',
# 'link': 'http://127.0.0.1:8000/shared-libs/',
# },
# ]
| []
| []
| [
"SEAFILE_CENTRAL_CONF_DIR",
"SEAFILE_DOCS",
"SEAHUB_LOG_DIR",
"EVENTS_CONFIG_FILE"
]
| [] | ["SEAFILE_CENTRAL_CONF_DIR", "SEAFILE_DOCS", "SEAHUB_LOG_DIR", "EVENTS_CONFIG_FILE"] | python | 4 | 0 | |
river/ensemble/__init__.py | """Ensemble learning.
Broadly speaking, there are two kinds of ensemble approaches. There are those that copy a single
model several times and aggregate the predictions of said copies. This includes bagging as well as
boosting. Then there are those that are composed of an arbitrary list of models, and can therefore
aggregate predictions from different kinds of models.
"""
from .adaptive_random_forest import (
AdaptiveRandomForestClassifier,
AdaptiveRandomForestRegressor,
)
from .bagging import (
ADWINBaggingClassifier,
BaggingClassifier,
BaggingRegressor,
LeveragingBaggingClassifier,
)
from .boosting import AdaBoostClassifier
from .ewa import EWARegressor
from .stacking import StackingClassifier
from .streaming_random_patches import SRPClassifier, SRPRegressor
from .voting import VotingClassifier
__all__ = [
"AdaptiveRandomForestClassifier",
"AdaptiveRandomForestRegressor",
"AdaBoostClassifier",
"ADWINBaggingClassifier",
"BaggingClassifier",
"BaggingRegressor",
"EWARegressor",
"LeveragingBaggingClassifier",
"SRPClassifier",
"SRPRegressor",
"StackingClassifier",
"VotingClassifier",
]
| []
| []
| []
| [] | [] | python | null | null | null |
src/fuse_nfs_cert_test.go | package fuse_nfs_certs
import (
"os/exec"
"code.cloudfoundry.org/lager"
"code.cloudfoundry.org/lager/lagertest"
"path/filepath"
"syscall"
"fmt"
"os"
"math/rand"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
PCAP uint32 = 200
)
var _ = Describe("Certify with: ", func() {
var (
testLogger lager.Logger
err error
output []byte
source string
mountPoint string
pcapMountPath string
rootMountPath string
filename string
)
BeforeEach(func() {
testLogger = lagertest.NewTestLogger("MainTest")
source = os.Getenv("FUSE_MOUNT")
Expect(source).NotTo(Equal(""))
mountPoint = os.Getenv("NFS_MOUNT")
Expect(source).NotTo(Equal(""))
filename = randomString(10)
})
Context("given a pcap user with uid:gid 200:200", func() {
BeforeEach(func() {
output, err = asRoot(testLogger, "groupadd", "-g", fmt.Sprintf("%d", PCAP), "pcap")
Expect(err).NotTo(HaveOccurred())
output, err = asRoot(testLogger, "useradd", "-u", fmt.Sprintf("%d", PCAP), "-g", fmt.Sprintf("%d", PCAP), "pcap")
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
output, err = asRoot(testLogger, "userdel", "pcap")
output, err = asRoot(testLogger, "groupdel", "pcap")
})
Context("given a fuse-nfs mount mapping pcap user to uid:gid 3000:3050", func() {
BeforeEach(func() {
// pcap mount
pcapMountPath = filepath.Join("/tmp", "fuse_nfs_certs")
output, err = asUser(testLogger, PCAP, PCAP, "mkdir", "-p", pcapMountPath)
Expect(err).NotTo(HaveOccurred())
output, err = asUser(testLogger, PCAP, PCAP, "fuse-nfs", "-n", source, "-m", pcapMountPath)
Expect(err).NotTo(HaveOccurred())
// root mount
rootMountPath = filepath.Join("/tmp", "fuse_nfs_certs_root")
output, err = asRoot(testLogger, "mkdir", "-p", rootMountPath)
Expect(err).NotTo(HaveOccurred())
output, err = asRoot(testLogger, "mount", "-t", "nfs", "-o", "nfsvers=3,nolock", mountPoint, rootMountPath)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
output, err = asUser(testLogger, PCAP, PCAP, "rm", filepath.Join(pcapMountPath, filename))
Expect(err).NotTo(HaveOccurred())
output, err = asRoot(testLogger, "umount", rootMountPath)
Expect(err).NotTo(HaveOccurred())
err = os.Remove(rootMountPath)
Expect(err).ToNot(HaveOccurred())
output, err = asRoot(testLogger, "umount", "-f", pcapMountPath)
Expect(err).NotTo(HaveOccurred())
err = os.Remove(pcapMountPath)
Expect(err).ToNot(HaveOccurred())
})
It("successfully creates a file with uid:gid pcap:pcap", func() {
output, err = asUser(testLogger, PCAP, PCAP, "touch", filepath.Join(pcapMountPath, filename))
Expect(err).NotTo(HaveOccurred())
output, err = asUser(testLogger, PCAP, PCAP, "stat", "-c", "%u:%g", filepath.Join(pcapMountPath, filename))
Expect(err).NotTo(HaveOccurred())
Expect(string(output)).To(Equal("200:200\n"))
output, err = asUser(testLogger, PCAP, PCAP, "stat", "-c", "%u:%g", filepath.Join(rootMountPath, filename))
Expect(err).NotTo(HaveOccurred())
Expect(string(output)).To(Equal("3000:3050\n"))
})
})
})
})
func asUser(logger lager.Logger, uid, gid uint32, cmd string, args ...string) ([]byte, error) {
logger.Info(fmt.Sprintf("Executing command %s %#v", cmd, args))
cmdHandle := exec.Command(cmd, args...)
attrs := syscall.SysProcAttr{
Credential: &syscall.Credential{
Uid: uid,
Gid: gid,
},
}
cmdHandle.SysProcAttr = &attrs
output, err := cmdHandle.CombinedOutput()
if err != nil {
logger.Error(string(output), err)
}
return output, err
}
func asRoot(logger lager.Logger, cmd string, args ...string) ([]byte, error) {
return asUser(logger, 0, 0, cmd, args...)
}
func randomString(n int) string {
runes := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, n)
for i := range b {
b[i] = runes[rand.Intn(len(runes))]
}
return string(b)
}
| [
"\"FUSE_MOUNT\"",
"\"NFS_MOUNT\""
]
| []
| [
"NFS_MOUNT",
"FUSE_MOUNT"
]
| [] | ["NFS_MOUNT", "FUSE_MOUNT"] | go | 2 | 0 | |
test/kernel/integration/memmap/test.py | #! /usr/bin/python
import sys
import os
includeos_src = os.environ.get('INCLUDEOS_SRC',
os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))).split('/test')[0])
sys.path.insert(0,includeos_src + "/test")
import vmrunner
def test2():
print "Booting VM 2 - lots of memory";
vmrunner.vms[1].boot(20)
vm = vmrunner.vms[0];
vm.make().on_exit_success(test2);
print "Booting VM 1 - default amount of memory"
vm.boot(20)
| []
| []
| [
"INCLUDEOS_SRC"
]
| [] | ["INCLUDEOS_SRC"] | python | 1 | 0 | |
keras/functional.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# load data set
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape)
# Flatten the dataset for NN consumption
# Original shape: (60000, 28, 28)
# -1: keep the value of dimension 0
# 28*28 = (dimension 1 * dimension 2)
# astype("float32") = minimize the complication of the data
# /255.0 = normalize values by lowering the range, for faster training ( 0-255 => 0-1 )
x_train = x_train.reshape(-1, 28 * 28).astype("float32") / 255.0
x_test = x_test.reshape(-1, 28 * 28).astype("float32") / 255.0
print(x_train.shape)
# Soft activation, can be automated later
# x_train = tf.convert_to_tensor(x_train)
# Create model: Functional API
# multiple inputs - multiple outputs
inputs = keras.Input(shape=(784))
x = layers.Dense(512, activation='relu', name='first_layer')(inputs) # initialize layer, then call it with the inputs
x = layers.Dense(512, activation='relu', name='second_layer')(x)
# x = layers.Dense(512, activation='relu', name='third_layer')(x)
# x = layers.Dense(512, activation='relu', name='fourth_layer')(x)
outputs = layers.Dense(10, activation='softmax', name='output_layer')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Show nn information
print(model.summary())
# SGD optimizer options
# lr_schedule = keras.optimizers.schedules.ExponentialDecay(
# initial_learning_rate=1e-2,
# decay_steps=10000,
# decay_rate=0.9)
# Tell keras how to train the model
model.compile(
# Loss function to evaluate performance
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(),
# optimizer=keras.optimizers.RMSprop(learning_rate=0.001),
# optimizer=keras.optimizers.SGD(learning_rate=lr_schedule),
metrics=['accuracy']
)
# Training of the nn
model.fit(x_train, y_train, batch_size=32, epochs=6, verbose=2) # verbose=2: prints after each epoch
# Evaluate nn
model.evaluate(x_test, y_test, batch_size=32, verbose=2)
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
controllers/sriovnetworknodepolicy_controller.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"encoding/json"
"fmt"
"os"
"sort"
"github.com/go-logr/logr"
dptypes "github.com/intel/sriov-network-device-plugin/pkg/types"
errs "github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
uns "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
"github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/apply"
render "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/render"
)
// SriovNetworkNodePolicyReconciler reconciles a SriovNetworkNodePolicy object
type SriovNetworkNodePolicyReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
var ctlrlogger = logf.Log.WithName("SriovNetworkNodePolicyController")
// +kubebuilder:rbac:groups=sriovnetwork.openshift.io,resources=sriovnetworknodepolicies,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=sriovnetwork.openshift.io,resources=sriovnetworknodepolicies/status,verbs=get;update;patch
func (r *SriovNetworkNodePolicyReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
reqLogger := r.Log.WithValues("sriovnetworknodepolicy", req.NamespacedName)
reqLogger.Info("Reconciling")
defaultPolicy := &sriovnetworkv1.SriovNetworkNodePolicy{}
err := r.Get(context.TODO(), types.NamespacedName{Name: DEFAULT_POLICY_NAME, Namespace: namespace}, defaultPolicy)
if err != nil {
if errors.IsNotFound(err) {
// Default policy object not found, create it.
defaultPolicy.SetNamespace(namespace)
defaultPolicy.SetName(DEFAULT_POLICY_NAME)
defaultPolicy.Spec = sriovnetworkv1.SriovNetworkNodePolicySpec{
NumVfs: 0,
NodeSelector: make(map[string]string),
NicSelector: sriovnetworkv1.SriovNetworkNicSelector{},
}
err = r.Create(context.TODO(), defaultPolicy)
if err != nil {
reqLogger.Error(err, "Failed to create default Policy", "Namespace", namespace, "Name", DEFAULT_POLICY_NAME)
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
// Fetch the SriovNetworkNodePolicyList
policyList := &sriovnetworkv1.SriovNetworkNodePolicyList{}
err = r.List(context.TODO(), policyList, &client.ListOptions{})
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
// Fetch the Nodes
nodeList := &corev1.NodeList{}
lo := &client.MatchingLabels{}
defaultOpConf := &sriovnetworkv1.SriovOperatorConfig{}
if err := r.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: DEFAULT_CONFIG_NAME}, defaultOpConf); err != nil {
return reconcile.Result{}, err
}
if len(defaultOpConf.Spec.ConfigDaemonNodeSelector) > 0 {
labels := client.MatchingLabels(defaultOpConf.Spec.ConfigDaemonNodeSelector)
lo = &labels
} else {
lo = &client.MatchingLabels{
"node-role.kubernetes.io/worker": "",
"beta.kubernetes.io/os": "linux",
}
}
err = r.List(context.TODO(), nodeList, lo)
if err != nil {
// Error reading the object - requeue the request.
reqLogger.Error(err, "Fail to list nodes")
return reconcile.Result{}, err
}
// Sort the policies with priority, higher priority ones is applied later
sort.Sort(sriovnetworkv1.ByPriority(policyList.Items))
// Sync Sriov device plugin ConfigMap object
if err = r.syncDevicePluginConfigMap(policyList, nodeList); err != nil {
return reconcile.Result{}, err
}
// Render and sync Daemon objects
if err = r.syncPluginDaemonObjs(defaultPolicy, policyList); err != nil {
return reconcile.Result{}, err
}
// Sync SriovNetworkNodeState objects
if err = r.syncAllSriovNetworkNodeStates(defaultPolicy, policyList, nodeList); err != nil {
return reconcile.Result{}, err
}
// All was successful. Request that this be re-triggered after ResyncPeriod,
// so we can reconcile state again.
return reconcile.Result{RequeueAfter: ResyncPeriod}, nil
}
func (r *SriovNetworkNodePolicyReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&sriovnetworkv1.SriovNetworkNodePolicy{}).
Owns(&appsv1.DaemonSet{}).
Owns(&sriovnetworkv1.SriovNetworkNodeState{}).
Complete(r)
}
func (r *SriovNetworkNodePolicyReconciler) syncDevicePluginConfigMap(pl *sriovnetworkv1.SriovNetworkNodePolicyList, nl *corev1.NodeList) error {
logger := r.Log.WithName("syncDevicePluginConfigMap")
logger.Info("Start to sync device plugin ConfigMap")
configData := make(map[string]string)
for _, node := range nl.Items {
data, err := r.renderDevicePluginConfigData(pl, &node)
if err != nil {
return err
}
config, err := json.Marshal(data)
if err != nil {
return err
}
configData[node.Name] = string(config)
}
cm := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: CONFIGMAP_NAME,
Namespace: namespace,
},
Data: configData,
}
found := &corev1.ConfigMap{}
err := r.Get(context.TODO(), types.NamespacedName{Namespace: cm.Namespace, Name: cm.Name}, found)
if err != nil {
if errors.IsNotFound(err) {
err = r.Create(context.TODO(), cm)
if err != nil {
return fmt.Errorf("Couldn't create ConfigMap: %v", err)
}
logger.Info("Created ConfigMap for", cm.Namespace, cm.Name)
} else {
return fmt.Errorf("Failed to get ConfigMap: %v", err)
}
} else {
logger.Info("ConfigMap already exists, updating")
err = r.Update(context.TODO(), cm)
if err != nil {
return fmt.Errorf("Couldn't update ConfigMap: %v", err)
}
}
return nil
}
func (r *SriovNetworkNodePolicyReconciler) syncAllSriovNetworkNodeStates(np *sriovnetworkv1.SriovNetworkNodePolicy, npl *sriovnetworkv1.SriovNetworkNodePolicyList, nl *corev1.NodeList) error {
logger := r.Log.WithName("syncAllSriovNetworkNodeStates")
logger.Info("Start to sync all SriovNetworkNodeState custom resource")
found := &corev1.ConfigMap{}
if err := r.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: CONFIGMAP_NAME}, found); err != nil {
logger.Info("Fail to get", "ConfigMap", CONFIGMAP_NAME)
}
for _, node := range nl.Items {
logger.Info("Sync SriovNetworkNodeState CR", "name", node.Name)
ns := &sriovnetworkv1.SriovNetworkNodeState{}
ns.Name = node.Name
ns.Namespace = namespace
j, _ := json.Marshal(ns)
logger.Info("SriovNetworkNodeState CR", "content", j)
if err := r.syncSriovNetworkNodeState(np, npl, ns, &node, found.GetResourceVersion()); err != nil {
logger.Error(err, "Fail to sync", "SriovNetworkNodeState", ns.Name)
return err
}
}
logger.Info("Remove SriovNetworkNodeState custom resource for unselected node")
nsList := &sriovnetworkv1.SriovNetworkNodeStateList{}
err := r.List(context.TODO(), nsList, &client.ListOptions{})
if err != nil {
if !errors.IsNotFound(err) {
logger.Info("Fail to list SriovNetworkNodeState CRs")
return err
}
} else {
for _, ns := range nsList.Items {
found := false
for _, node := range nl.Items {
logger.Info("validate", "SriovNetworkNodeState", ns.GetName(), "node", node.GetName())
if ns.GetName() == node.GetName() {
found = true
break
}
}
if !found {
err := r.Delete(context.TODO(), &ns, &client.DeleteOptions{})
if err != nil {
logger.Info("Fail to Delete", "SriovNetworkNodeState CR:", ns.GetName())
return err
}
}
}
}
return nil
}
func (r *SriovNetworkNodePolicyReconciler) syncSriovNetworkNodeState(np *sriovnetworkv1.SriovNetworkNodePolicy, npl *sriovnetworkv1.SriovNetworkNodePolicyList, ns *sriovnetworkv1.SriovNetworkNodeState, node *corev1.Node, cksum string) error {
logger := r.Log.WithName("syncSriovNetworkNodeState")
logger.Info("Start to sync SriovNetworkNodeState", "Name", ns.Name, "cksum", cksum)
if err := controllerutil.SetControllerReference(np, ns, r.Scheme); err != nil {
return err
}
found := &sriovnetworkv1.SriovNetworkNodeState{}
err := r.Get(context.TODO(), types.NamespacedName{Namespace: ns.Namespace, Name: ns.Name}, found)
if err != nil {
logger.Info("Fail to get SriovNetworkNodeState", "namespace", ns.Namespace, "name", ns.Name)
if errors.IsNotFound(err) {
ns.Spec.DpConfigVersion = cksum
err = r.Create(context.TODO(), ns)
if err != nil {
return fmt.Errorf("Couldn't create SriovNetworkNodeState: %v", err)
}
logger.Info("Created SriovNetworkNodeState for", ns.Namespace, ns.Name)
} else {
return fmt.Errorf("Failed to get SriovNetworkNodeState: %v", err)
}
} else {
logger.Info("SriovNetworkNodeState already exists, updating")
newVersion := found.DeepCopy()
newVersion.Spec = ns.Spec
// Previous Policy Priority(ppp) records the priority of previous evaluated policy in node policy list.
// Since node policy list is already sorted with priority number, comparing current priority with ppp shall
// be sufficient.
// ppp is set to 100 as initial value to avoid matching with the first policy in policy list, although
// it should not matter since the flag used in p.Apply() will only be applied when VF partition is detected.
ppp := 100
for _, p := range npl.Items {
if p.Name == "default" {
continue
}
if p.Selected(node) {
logger.Info("apply", "policy", p.Name, "node", node.Name)
// Merging only for policies with the same priority (ppp == p.Spec.Priority)
// This boolean flag controls merging of PF configuration (e.g. mtu, numvfs etc)
// when VF partition is configured.
p.Apply(newVersion, bool(ppp == p.Spec.Priority))
// record the evaluated policy priority for next loop
ppp = p.Spec.Priority
}
}
newVersion.Spec.DpConfigVersion = cksum
if equality.Semantic.DeepDerivative(newVersion.Spec, found.Spec) {
logger.Info("SriovNetworkNodeState did not change, not updating")
return nil
}
err = r.Update(context.TODO(), newVersion)
if err != nil {
return fmt.Errorf("Couldn't update SriovNetworkNodeState: %v", err)
}
}
return nil
}
func (r *SriovNetworkNodePolicyReconciler) syncPluginDaemonObjs(dp *sriovnetworkv1.SriovNetworkNodePolicy, pl *sriovnetworkv1.SriovNetworkNodePolicyList) error {
logger := r.Log.WithName("syncPluginDaemonObjs")
logger.Info("Start to sync sriov daemons objects")
// render RawCNIConfig manifests
data := render.MakeRenderData()
data.Data["Namespace"] = namespace
data.Data["SRIOVCNIImage"] = os.Getenv("SRIOV_CNI_IMAGE")
data.Data["SRIOVInfiniBandCNIImage"] = os.Getenv("SRIOV_INFINIBAND_CNI_IMAGE")
data.Data["SRIOVDevicePluginImage"] = os.Getenv("SRIOV_DEVICE_PLUGIN_IMAGE")
data.Data["ReleaseVersion"] = os.Getenv("RELEASEVERSION")
data.Data["ResourcePrefix"] = os.Getenv("RESOURCE_PREFIX")
envCniBinPath := os.Getenv("SRIOV_CNI_BIN_PATH")
if envCniBinPath == "" {
data.Data["CNIBinPath"] = "/var/lib/cni/bin"
} else {
logger.Info("New cni bin found", "CNIBinPath", envCniBinPath)
data.Data["CNIBinPath"] = envCniBinPath
}
objs, err := renderDsForCR(PLUGIN_PATH, &data)
if err != nil {
logger.Error(err, "Fail to render SR-IoV manifests")
return err
}
defaultConfig := &sriovnetworkv1.SriovOperatorConfig{}
err = r.Get(context.TODO(), types.NamespacedName{
Name: DEFAULT_CONFIG_NAME, Namespace: namespace}, defaultConfig)
if err != nil {
return err
}
if len(pl.Items) < 2 {
for _, obj := range objs {
err := r.deleteK8sResource(obj)
if err != nil {
return err
}
}
return nil
}
// Sync DaemonSets
for _, obj := range objs {
if obj.GetKind() == "DaemonSet" && len(defaultConfig.Spec.ConfigDaemonNodeSelector) > 0 {
scheme := kscheme.Scheme
ds := &appsv1.DaemonSet{}
err = scheme.Convert(obj, ds, nil)
if err != nil {
logger.Error(err, "Fail to convert to DaemonSet")
return err
}
ds.Spec.Template.Spec.NodeSelector = defaultConfig.Spec.ConfigDaemonNodeSelector
err = scheme.Convert(ds, obj, nil)
if err != nil {
logger.Error(err, "Fail to convert to Unstructured")
return err
}
}
err = r.syncDsObject(dp, pl, obj)
if err != nil {
logger.Error(err, "Couldn't sync SR-IoV daemons objects")
return err
}
}
return nil
}
func (r *SriovNetworkNodePolicyReconciler) deleteK8sResource(in *uns.Unstructured) error {
if err := apply.DeleteObject(context.TODO(), r, in); err != nil {
return fmt.Errorf("failed to delete object %v with err: %v", in, err)
}
return nil
}
func (r *SriovNetworkNodePolicyReconciler) syncDsObject(dp *sriovnetworkv1.SriovNetworkNodePolicy, pl *sriovnetworkv1.SriovNetworkNodePolicyList, obj *uns.Unstructured) error {
logger := r.Log.WithName("syncDsObject")
kind := obj.GetKind()
logger.Info("Start to sync Objects", "Kind", kind)
switch kind {
case "ServiceAccount", "Role", "RoleBinding":
if err := controllerutil.SetControllerReference(dp, obj, r.Scheme); err != nil {
return err
}
if err := apply.ApplyObject(context.TODO(), r, obj); err != nil {
logger.Error(err, "Fail to sync", "Kind", kind)
return err
}
case "DaemonSet":
ds := &appsv1.DaemonSet{}
err := r.Scheme.Convert(obj, ds, nil)
r.syncDaemonSet(dp, pl, ds)
if err != nil {
logger.Error(err, "Fail to sync DaemonSet", "Namespace", ds.Namespace, "Name", ds.Name)
return err
}
}
return nil
}
func (r *SriovNetworkNodePolicyReconciler) syncDaemonSet(cr *sriovnetworkv1.SriovNetworkNodePolicy, pl *sriovnetworkv1.SriovNetworkNodePolicyList, in *appsv1.DaemonSet) error {
logger := r.Log.WithName("syncDaemonSet")
logger.Info("Start to sync DaemonSet", "Namespace", in.Namespace, "Name", in.Name)
var err error
if pl != nil {
if err = setDsNodeAffinity(pl, in); err != nil {
return err
}
}
if err = controllerutil.SetControllerReference(cr, in, r.Scheme); err != nil {
return err
}
ds := &appsv1.DaemonSet{}
err = r.Get(context.TODO(), types.NamespacedName{Namespace: in.Namespace, Name: in.Name}, ds)
if err != nil {
if errors.IsNotFound(err) {
logger.Info("Created DaemonSet", in.Namespace, in.Name)
err = r.Create(context.TODO(), in)
if err != nil {
logger.Error(err, "Fail to create Daemonset", "Namespace", in.Namespace, "Name", in.Name)
return err
}
} else {
logger.Error(err, "Fail to get Daemonset", "Namespace", in.Namespace, "Name", in.Name)
return err
}
} else {
logger.Info("DaemonSet already exists, updating")
// DeepDerivative checks for changes only comparing non zero fields in the source struct.
// This skips default values added by the api server.
// References in https://github.com/kubernetes-sigs/kubebuilder/issues/592#issuecomment-625738183
if equality.Semantic.DeepDerivative(in.Spec, ds.Spec) {
logger.Info("Daemonset spec did not change, not updating")
return nil
}
err = r.Update(context.TODO(), in)
if err != nil {
logger.Error(err, "Fail to update DaemonSet", "Namespace", in.Namespace, "Name", in.Name)
return err
}
}
return nil
}
func setDsNodeAffinity(pl *sriovnetworkv1.SriovNetworkNodePolicyList, ds *appsv1.DaemonSet) error {
terms := nodeSelectorTermsForPolicyList(pl.Items)
if len(terms) > 0 {
ds.Spec.Template.Spec.Affinity = &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: terms,
},
},
}
}
return nil
}
func nodeSelectorTermsForPolicyList(policies []sriovnetworkv1.SriovNetworkNodePolicy) []corev1.NodeSelectorTerm {
terms := []corev1.NodeSelectorTerm{}
for _, p := range policies {
nodeSelector := corev1.NodeSelectorTerm{}
if len(p.Spec.NodeSelector) == 0 {
continue
}
expressions := []corev1.NodeSelectorRequirement{}
for k, v := range p.Spec.NodeSelector {
exp := corev1.NodeSelectorRequirement{
Operator: corev1.NodeSelectorOpIn,
Key: k,
Values: []string{v},
}
expressions = append(expressions, exp)
}
// sorting is needed to keep the daemon spec stable.
// the items are popped in a random order from the map
sort.Slice(expressions, func(i, j int) bool {
return expressions[i].Key < expressions[j].Key
})
nodeSelector = corev1.NodeSelectorTerm{
MatchExpressions: expressions,
}
terms = append(terms, nodeSelector)
}
return terms
}
// renderDsForCR returns a busybox pod with the same name/namespace as the cr
func renderDsForCR(path string, data *render.RenderData) ([]*uns.Unstructured, error) {
logger := ctlrlogger.WithName("renderDsForCR")
logger.Info("Start to render objects")
var err error
objs := []*uns.Unstructured{}
objs, err = render.RenderDir(path, data)
if err != nil {
return nil, errs.Wrap(err, "failed to render OpenShiftSRIOV Network manifests")
}
return objs, nil
}
func (r *SriovNetworkNodePolicyReconciler) renderDevicePluginConfigData(pl *sriovnetworkv1.SriovNetworkNodePolicyList, node *corev1.Node) (dptypes.ResourceConfList, error) {
logger := ctlrlogger.WithName("renderDevicePluginConfigData")
logger.Info("Start to render device plugin config data")
rcl := dptypes.ResourceConfList{}
for _, p := range pl.Items {
if p.Name == "default" {
continue
}
// render node specific data for device plugin config
if !p.Selected(node) {
continue
}
found, i := resourceNameInList(p.Spec.ResourceName, &rcl)
netDeviceSelectors := dptypes.NetDeviceSelectors{}
if found {
if err := json.Unmarshal(*rcl.ResourceList[i].Selectors, &netDeviceSelectors); err != nil {
return rcl, err
}
if p.Spec.NicSelector.Vendor != "" && !sriovnetworkv1.StringInArray(p.Spec.NicSelector.Vendor, netDeviceSelectors.Vendors) {
netDeviceSelectors.Vendors = append(netDeviceSelectors.Vendors, p.Spec.NicSelector.Vendor)
}
if p.Spec.NicSelector.DeviceID != "" {
var deviceID string
if p.Spec.NumVfs == 0 {
deviceID = p.Spec.NicSelector.DeviceID
} else {
deviceID = sriovnetworkv1.GetVfDeviceId(p.Spec.NicSelector.DeviceID)
}
if !sriovnetworkv1.StringInArray(deviceID, netDeviceSelectors.Devices) && deviceID != "" {
netDeviceSelectors.Devices = append(netDeviceSelectors.Devices, deviceID)
}
}
if len(p.Spec.NicSelector.PfNames) > 0 {
netDeviceSelectors.PfNames = sriovnetworkv1.UniqueAppend(netDeviceSelectors.PfNames, p.Spec.NicSelector.PfNames...)
}
if len(p.Spec.NicSelector.RootDevices) > 0 {
netDeviceSelectors.RootDevices = sriovnetworkv1.UniqueAppend(netDeviceSelectors.RootDevices, p.Spec.NicSelector.RootDevices...)
}
// Removed driver constraint for "netdevice" DeviceType
if p.Spec.DeviceType == "vfio-pci" {
netDeviceSelectors.Drivers = sriovnetworkv1.UniqueAppend(netDeviceSelectors.Drivers, p.Spec.DeviceType)
}
// Enable the selection of devices using NetFilter
if p.Spec.NicSelector.NetFilter != "" {
nodeState := &sriovnetworkv1.SriovNetworkNodeState{}
err := r.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: node.Name}, nodeState)
if err == nil {
// Loop through interfaces status to find a match for NetworkID or NetworkTag
for _, intf := range nodeState.Status.Interfaces {
if sriovnetworkv1.NetFilterMatch(p.Spec.NicSelector.NetFilter, intf.NetFilter) {
// Found a match add the Interfaces PciAddress
netDeviceSelectors.PciAddresses = sriovnetworkv1.UniqueAppend(netDeviceSelectors.PciAddresses, intf.PciAddress)
}
}
}
}
netDeviceSelectorsMarshal, err := json.Marshal(netDeviceSelectors)
if err != nil {
return rcl, err
}
rawNetDeviceSelectors := json.RawMessage(netDeviceSelectorsMarshal)
rcl.ResourceList[i].Selectors = &rawNetDeviceSelectors
logger.Info("Update resource", "Resource", rcl.ResourceList[i])
} else {
rc := &dptypes.ResourceConfig{
ResourceName: p.Spec.ResourceName,
}
netDeviceSelectors.IsRdma = p.Spec.IsRdma
if p.Spec.NicSelector.Vendor != "" {
netDeviceSelectors.Vendors = append(netDeviceSelectors.Vendors, p.Spec.NicSelector.Vendor)
}
if p.Spec.NicSelector.DeviceID != "" {
var deviceID string
if p.Spec.NumVfs == 0 {
deviceID = p.Spec.NicSelector.DeviceID
} else {
deviceID = sriovnetworkv1.GetVfDeviceId(p.Spec.NicSelector.DeviceID)
}
if !sriovnetworkv1.StringInArray(deviceID, netDeviceSelectors.Devices) && deviceID != "" {
netDeviceSelectors.Devices = append(netDeviceSelectors.Devices, deviceID)
}
}
if len(p.Spec.NicSelector.PfNames) > 0 {
netDeviceSelectors.PfNames = append(netDeviceSelectors.PfNames, p.Spec.NicSelector.PfNames...)
}
if len(p.Spec.NicSelector.RootDevices) > 0 {
netDeviceSelectors.RootDevices = append(netDeviceSelectors.RootDevices, p.Spec.NicSelector.RootDevices...)
}
// Removed driver constraint for "netdevice" DeviceType
if p.Spec.DeviceType == "vfio-pci" {
netDeviceSelectors.Drivers = append(netDeviceSelectors.Drivers, p.Spec.DeviceType)
}
// Enable the selection of devices using NetFilter
if p.Spec.NicSelector.NetFilter != "" {
nodeState := &sriovnetworkv1.SriovNetworkNodeState{}
err := r.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: node.Name}, nodeState)
if err == nil {
// Loop through interfaces status to find a match for NetworkID or NetworkTag
for _, intf := range nodeState.Status.Interfaces {
if sriovnetworkv1.NetFilterMatch(p.Spec.NicSelector.NetFilter, intf.NetFilter) {
// Found a match add the Interfaces PciAddress
netDeviceSelectors.PciAddresses = sriovnetworkv1.UniqueAppend(netDeviceSelectors.PciAddresses, intf.PciAddress)
}
}
}
}
netDeviceSelectorsMarshal, err := json.Marshal(netDeviceSelectors)
if err != nil {
return rcl, err
}
rawNetDeviceSelectors := json.RawMessage(netDeviceSelectorsMarshal)
rc.Selectors = &rawNetDeviceSelectors
rcl.ResourceList = append(rcl.ResourceList, *rc)
logger.Info("Add resource", "Resource", *rc, "Resource list", rcl.ResourceList)
}
}
return rcl, nil
}
func resourceNameInList(name string, rcl *dptypes.ResourceConfList) (bool, int) {
for i, rc := range rcl.ResourceList {
if rc.ResourceName == name {
return true, i
}
}
return false, 0
}
| [
"\"SRIOV_CNI_IMAGE\"",
"\"SRIOV_INFINIBAND_CNI_IMAGE\"",
"\"SRIOV_DEVICE_PLUGIN_IMAGE\"",
"\"RELEASEVERSION\"",
"\"RESOURCE_PREFIX\"",
"\"SRIOV_CNI_BIN_PATH\""
]
| []
| [
"SRIOV_CNI_BIN_PATH",
"SRIOV_DEVICE_PLUGIN_IMAGE",
"RELEASEVERSION",
"SRIOV_INFINIBAND_CNI_IMAGE",
"RESOURCE_PREFIX",
"SRIOV_CNI_IMAGE"
]
| [] | ["SRIOV_CNI_BIN_PATH", "SRIOV_DEVICE_PLUGIN_IMAGE", "RELEASEVERSION", "SRIOV_INFINIBAND_CNI_IMAGE", "RESOURCE_PREFIX", "SRIOV_CNI_IMAGE"] | go | 6 | 0 | |
main/1200-1299/1202D_test.go | package main
import (
"github.com/EndlessCheng/codeforces-go/main/testutil"
"testing"
)
func TestSol1202D(t *testing.T) {
// just copy from website
rawText := `2
6
1
outputCopy
113337
1337`
testutil.AssertEqual(t, rawText, Sol1202D)
}
| []
| []
| []
| [] | [] | go | null | null | null |
run_fast_dqn.py | from argparse import ArgumentParser
from distutils.util import strtobool
import itertools
import os
os.environ['TF_DETERMINISTIC_OPS'] = '1'
from threading import Thread
from queue import Queue
import numpy as np
from run_dqn import DQNAgent, main, make_parser
from fast_dqn.worker import Worker
class FastDQNAgent(DQNAgent):
def __init__(self, make_env_fn, workers=8, concurrent=True, synchronize=True, **kwargs):
assert workers >= 1
if synchronize:
assert workers != 1
envs = tuple(make_env_fn(i) for i in range(workers))
self.shared_states = np.empty([workers, *envs[0].observation_space.shape], dtype=np.float32)
self.shared_qvalues = np.empty([workers, envs[0].action_space.n], dtype=np.float32)
self._workers = tuple(Worker(i, env=envs[i], agent=self) for i in range(workers))
super().__init__(make_env_fn, **kwargs)
self._env = env = self._workers[0]._env
if synchronize:
# Target update frequency must be divisible by number of workers to
# ensure workers use the correct network parameters when synchronized
assert self._target_update_freq % workers == 0
assert self._target_update_freq % self._train_freq == 0
self._minibatches_per_epoch = self._target_update_freq // self._train_freq
self._concurrent_training = concurrent
self._synchronize = synchronize
self._train_queue = Queue()
Thread(target=self._train_loop, daemon=True).start()
def run(self, duration):
self._prepopulate_replay_memory()
self._sync_everything()
for t in itertools.count(start=1):
if self._evaluate > 0 and t % self._evaluate == 1:
self._sync_everything()
mean_perf, std_perf = self.benchmark(epsilon=0.05, episodes=30)
print("Benchmark (t={}): mean={}, std={}".format(t - 1, mean_perf, std_perf))
if t > duration:
self._sync_everything()
return
if t % self._target_update_freq == 1:
self._sync_everything()
self._dqn.update_target_net()
if self._concurrent_training:
for _ in range(self._minibatches_per_epoch):
self._train_queue.put_nowait(None)
if not self._concurrent_training:
if t % self._train_freq == 1:
self._sync_workers()
self._train_queue.put_nowait(None)
self._train_queue.join()
i = t % len(self._workers)
if i == 1 and self._synchronize:
self._sync_workers()
# Compute the Q-values in a single minibatch
# We use the target network here so we can train the main network in parallel
self.shared_qvalues[:] = self._dqn.predict_target(self.shared_states).numpy()
self._workers[i].update(t)
def _train_loop(self):
while True:
self._train_queue.get()
minibatch = self._replay_memory.sample(self._batch_size)
self._dqn.train(*minibatch)
self._train_queue.task_done()
def _sync_workers(self):
for w in self._workers:
w.join()
def _flush_workers(self):
self._env.flush_monitor()
for w in self._workers:
for transition in w.flush():
self._replay_memory.save(*transition)
def _sync_everything(self):
self._train_queue.join()
self._sync_workers()
self._flush_workers()
def _step(self, epsilon):
return self._workers[0]._step(epsilon)
if __name__ == '__main__':
parser = make_parser()
parser.add_argument('--concurrent', type=strtobool, default=True)
parser.add_argument('--workers', type=int, default=8)
parser.add_argument('--synchronize', type=strtobool, default=True)
kwargs = vars(parser.parse_args())
main(FastDQNAgent, kwargs)
| []
| []
| [
"TF_DETERMINISTIC_OPS"
]
| [] | ["TF_DETERMINISTIC_OPS"] | python | 1 | 0 | |
python/pyspark/pandas/indexes/multi.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from functools import partial
from typing import Any, Optional, Tuple, Union, cast
import warnings
import pandas as pd
from pandas.api.types import is_list_like
from pandas.api.types import is_hashable
from pyspark import sql as spark
from pyspark.sql import functions as F, Window
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.missing.indexes import MissingPandasLikeMultiIndex
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.utils import (
compare_disallow_null,
is_name_like_tuple,
name_like_string,
scol_for,
verify_temp_column_name,
)
from pyspark.pandas.internal import (
InternalFrame,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.typedef import Scalar
class MultiIndex(Index):
"""
Koalas MultiIndex that corresponds to pandas MultiIndex logically. This might hold Spark Column
internally.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : A single-level Index.
Examples
--------
>>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=[[1, 2, 3], [4, 5, 6]]).index # doctest: +SKIP
MultiIndex([(1, 4),
(2, 5),
(3, 6)],
)
>>> ps.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')]).index # doctest: +SKIP
MultiIndex([('a', 'd'),
('b', 'e'),
('c', 'f')],
)
"""
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
):
if LooseVersion(pd.__version__) < LooseVersion("0.24"):
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
pidx = pd.MultiIndex(
levels=levels,
labels=codes,
sortorder=sortorder,
names=names,
dtype=dtype,
copy=copy,
name=name,
verify_integrity=verify_integrity,
)
else:
pidx = pd.MultiIndex(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
dtype=dtype,
copy=copy,
name=name,
verify_integrity=verify_integrity,
)
return ps.from_pandas(pidx)
@property
def _internal(self):
internal = self._kdf._internal
scol = F.struct(internal.index_spark_columns)
return internal.copy(
column_labels=[None],
data_spark_columns=[scol],
data_dtypes=[None],
column_label_names=None,
)
@property
def _column_label(self):
return None
def __abs__(self):
raise TypeError("TypeError: cannot perform __abs__ with this index type: MultiIndex")
def _with_new_scol(self, scol: spark.Column, *, dtype=None):
raise NotImplementedError("Not supported for type MultiIndex")
def _align_and_column_op(self, f, *args) -> Index:
raise NotImplementedError("Not supported for type MultiIndex")
def any(self, *args, **kwargs) -> None:
raise TypeError("cannot perform any with this index type: MultiIndex")
def all(self, *args, **kwargs) -> None:
raise TypeError("cannot perform all with this index type: MultiIndex")
@staticmethod
def from_tuples(tuples, sortorder=None, names=None) -> "MultiIndex":
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> ps.MultiIndex.from_tuples(tuples, names=('number', 'color')) # doctest: +SKIP
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
return cast(
MultiIndex,
ps.from_pandas(
pd.MultiIndex.from_tuples(tuples=tuples, sortorder=sortorder, names=names)
),
)
@staticmethod
def from_arrays(arrays, sortorder=None, names=None) -> "MultiIndex":
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays: list / sequence of array-likes
Each array-like gives one level’s value for each data point. len(arrays)
is the number of levels.
sortorder: int or None
Level of sortedness (must be lexicographically sorted by that level).
names: list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index: MultiIndex
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> ps.MultiIndex.from_arrays(arrays, names=('number', 'color')) # doctest: +SKIP
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
return cast(
MultiIndex,
ps.from_pandas(
pd.MultiIndex.from_arrays(arrays=arrays, sortorder=sortorder, names=names)
),
)
@staticmethod
def from_product(iterables, sortorder=None, names=None) -> "MultiIndex":
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> ps.MultiIndex.from_product([numbers, colors],
... names=['number', 'color']) # doctest: +SKIP
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
return cast(
MultiIndex,
ps.from_pandas(
pd.MultiIndex.from_product(iterables=iterables, sortorder=sortorder, names=names)
),
)
@staticmethod
def from_frame(df, names=None) -> "MultiIndex":
"""
Make a MultiIndex from a DataFrame.
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = ps.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df # doctest: +SKIP
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> ps.MultiIndex.from_frame(df) # doctest: +SKIP
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> ps.MultiIndex.from_frame(df, names=['state', 'observation']) # doctest: +SKIP
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, DataFrame):
raise TypeError("Input must be a DataFrame")
sdf = df.to_spark()
if names is None:
names = df._internal.column_labels
elif not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
else:
names = [name if is_name_like_tuple(name) else (name,) for name in names]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in sdf.columns],
index_names=names,
)
return cast(MultiIndex, DataFrame(internal).index)
@property
def name(self) -> str:
raise PandasNotImplementedError(class_name="pd.MultiIndex", property_name="name")
@name.setter
def name(self, name: str) -> None:
raise PandasNotImplementedError(class_name="pd.MultiIndex", property_name="name")
def _verify_for_rename(self, name):
if is_list_like(name):
if self._internal.index_level != len(name):
raise ValueError(
"Length of new names must be {}, got {}".format(
self._internal.index_level, len(name)
)
)
if any(not is_hashable(n) for n in name):
raise TypeError("MultiIndex.name must be a hashable type")
return [n if is_name_like_tuple(n) else (n,) for n in name]
else:
raise TypeError("Must pass list-like as `names`.")
def swaplevel(self, i=-2, j=-1) -> "MultiIndex":
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
Examples
--------
>>> midx = ps.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names = ['word', 'number'])
>>> midx # doctest: +SKIP
MultiIndex([('a', 1),
('b', 2)],
names=['word', 'number'])
>>> midx.swaplevel(0, 1) # doctest: +SKIP
MultiIndex([(1, 'a'),
(2, 'b')],
names=['number', 'word'])
>>> midx.swaplevel('number', 'word') # doctest: +SKIP
MultiIndex([(1, 'a'),
(2, 'b')],
names=['number', 'word'])
"""
for index in (i, j):
if not isinstance(index, int) and index not in self.names:
raise KeyError("Level %s not found" % index)
i = i if isinstance(i, int) else self.names.index(i)
j = j if isinstance(j, int) else self.names.index(j)
for index in (i, j):
if index >= len(self.names) or index < -len(self.names):
raise IndexError(
"Too many levels: Index has only %s levels, "
"%s is not a valid level number" % (len(self.names), index)
)
index_map = list(
zip(
self._internal.index_spark_columns,
self._internal.index_names,
self._internal.index_dtypes,
)
)
index_map[i], index_map[j], = index_map[j], index_map[i]
index_spark_columns, index_names, index_dtypes = zip(*index_map)
internal = self._internal.copy(
index_spark_columns=list(index_spark_columns),
index_names=list(index_names),
index_dtypes=list(index_dtypes),
column_labels=[],
data_spark_columns=[],
data_dtypes=[],
)
return cast(MultiIndex, DataFrame(internal).index)
@property
def levshape(self) -> Tuple[int, ...]:
"""
A tuple with the length of each level.
Examples
--------
>>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> midx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
>>> midx.levshape
(3, 3)
"""
result = self._internal.spark_frame.agg(
*(F.countDistinct(c) for c in self._internal.index_spark_columns)
).collect()[0]
return tuple(result)
@staticmethod
def _comparator_for_monotonic_increasing(data_type):
return compare_disallow_null
def _is_monotonic(self, order):
if order == "increasing":
return self._is_monotonic_increasing().all()
else:
return self._is_monotonic_decreasing().all()
def _is_monotonic_increasing(self):
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-1, -1)
cond = F.lit(True)
has_not_null = F.lit(True)
for scol in self._internal.index_spark_columns[::-1]:
data_type = self._internal.spark_type_for(scol)
prev = F.lag(scol, 1).over(window)
compare = MultiIndex._comparator_for_monotonic_increasing(data_type)
# Since pandas 1.1.4, null value is not allowed at any levels of MultiIndex.
# Therefore, we should check `has_not_null` over the all levels.
has_not_null = has_not_null & scol.isNotNull()
cond = F.when(scol.eqNullSafe(prev), cond).otherwise(
compare(scol, prev, spark.Column.__gt__)
)
cond = has_not_null & (prev.isNull() | cond)
cond_name = verify_temp_column_name(
self._internal.spark_frame.select(self._internal.index_spark_columns),
"__is_monotonic_increasing_cond__",
)
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns + [cond.alias(cond_name)]
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
return first_series(DataFrame(internal))
@staticmethod
def _comparator_for_monotonic_decreasing(data_type):
return compare_disallow_null
def _is_monotonic_decreasing(self):
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-1, -1)
cond = F.lit(True)
has_not_null = F.lit(True)
for scol in self._internal.index_spark_columns[::-1]:
data_type = self._internal.spark_type_for(scol)
prev = F.lag(scol, 1).over(window)
compare = MultiIndex._comparator_for_monotonic_increasing(data_type)
# Since pandas 1.1.4, null value is not allowed at any levels of MultiIndex.
# Therefore, we should check `has_not_null` over the all levels.
has_not_null = has_not_null & scol.isNotNull()
cond = F.when(scol.eqNullSafe(prev), cond).otherwise(
compare(scol, prev, spark.Column.__lt__)
)
cond = has_not_null & (prev.isNull() | cond)
cond_name = verify_temp_column_name(
self._internal.spark_frame.select(self._internal.index_spark_columns),
"__is_monotonic_decreasing_cond__",
)
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns + [cond.alias(cond_name)]
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
return first_series(DataFrame(internal))
def to_frame(self, index=True, name=None) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of strings, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> idx = ps.MultiIndex.from_tuples(tuples, names=('number', 'color'))
>>> idx # doctest: +SKIP
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
>>> idx.to_frame() # doctest: +NORMALIZE_WHITESPACE
number color
number color
1 red 1 red
blue 1 blue
2 red 2 red
blue 2 blue
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
number color
0 1 red
1 1 blue
2 2 red
3 2 blue
To override the name of the resulting column, specify `name`:
>>> idx.to_frame(name=['n', 'c']) # doctest: +NORMALIZE_WHITESPACE
n c
number color
1 red 1 red
blue 1 blue
2 red 2 red
blue 2 blue
"""
if name is None:
name = [
name if name is not None else (i,)
for i, name in enumerate(self._internal.index_names)
]
elif is_list_like(name):
if len(name) != self._internal.index_level:
raise ValueError("'name' should have same length as number of levels on index.")
name = [n if is_name_like_tuple(n) else (n,) for n in name]
else:
raise TypeError("'name' must be a list / sequence of column names.")
return self._to_frame(index=index, names=name)
def to_pandas(self) -> pd.MultiIndex:
"""
Return a pandas MultiIndex.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=[list('abcd'), list('efgh')])
>>> df['dogs'].index.to_pandas() # doctest: +SKIP
MultiIndex([('a', 'e'),
('b', 'f'),
('c', 'g'),
('d', 'h')],
)
"""
# TODO: We might need to handle internal state change.
# So far, we don't have any functions to change the internal state of MultiIndex except for
# series-like operations. In that case, it creates new Index object instead of MultiIndex.
return super().to_pandas()
def toPandas(self) -> pd.MultiIndex:
warnings.warn(
"MultiIndex.toPandas is deprecated as of MultiIndex.to_pandas. "
"Please use the API instead.",
FutureWarning,
)
return self.to_pandas()
toPandas.__doc__ = to_pandas.__doc__
def nunique(self, dropna=True) -> None: # type: ignore
raise NotImplementedError("nunique is not defined for MultiIndex")
# TODO: add 'name' parameter after pd.MultiIndex.name is implemented
def copy(self, deep=None) -> "MultiIndex": # type: ignore
"""
Make a copy of this object.
Parameters
----------
deep : None
this parameter is not supported but just dummy parameter to match pandas.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=[list('abcd'), list('efgh')])
>>> df['dogs'].index # doctest: +SKIP
MultiIndex([('a', 'e'),
('b', 'f'),
('c', 'g'),
('d', 'h')],
)
Copy index
>>> df.index.copy() # doctest: +SKIP
MultiIndex([('a', 'e'),
('b', 'f'),
('c', 'g'),
('d', 'h')],
)
"""
return super().copy(deep=deep) # type: ignore
def symmetric_difference(self, other, result_name=None, sort=None) -> "MultiIndex":
"""
Compute the symmetric difference of two MultiIndex objects.
Parameters
----------
other : Index or array-like
result_name : list
sort : True or None, default None
Whether to sort the resulting index.
* True : Attempt to sort the result.
* None : Do not sort the result.
Returns
-------
symmetric_difference : MiltiIndex
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> midx1 = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 0, 0, 0, 1, 2, 0, 1, 2]])
>>> midx2 = pd.MultiIndex([['koalas', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 0, 0, 0, 1, 2, 0, 1, 2]])
>>> s1 = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx1)
>>> s2 = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx2)
>>> s1.index.symmetric_difference(s2.index) # doctest: +SKIP
MultiIndex([('koalas', 'speed'),
( 'lama', 'speed')],
)
You can set names of result Index.
>>> s1.index.symmetric_difference(s2.index, result_name=['a', 'b']) # doctest: +SKIP
MultiIndex([('koalas', 'speed'),
( 'lama', 'speed')],
names=['a', 'b'])
You can set sort to `True`, if you want to sort the resulting index.
>>> s1.index.symmetric_difference(s2.index, sort=True) # doctest: +SKIP
MultiIndex([('koalas', 'speed'),
( 'lama', 'speed')],
)
You can also use the ``^`` operator:
>>> s1.index ^ s2.index # doctest: +SKIP
MultiIndex([('koalas', 'speed'),
( 'lama', 'speed')],
)
"""
if type(self) != type(other):
raise NotImplementedError(
"Doesn't support symmetric_difference between Index & MultiIndex for now"
)
sdf_self = self._kdf._internal.spark_frame.select(self._internal.index_spark_columns)
sdf_other = other._kdf._internal.spark_frame.select(other._internal.index_spark_columns)
sdf_symdiff = sdf_self.union(sdf_other).subtract(sdf_self.intersect(sdf_other))
if sort:
sdf_symdiff = sdf_symdiff.sort(self._internal.index_spark_columns)
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf_symdiff,
index_spark_columns=[
scol_for(sdf_symdiff, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
)
result = cast(MultiIndex, DataFrame(internal).index)
if result_name:
result.names = result_name
return result
# TODO: ADD error parameter
def drop(self, codes, level=None) -> "MultiIndex":
"""
Make new MultiIndex with passed list of labels deleted
Parameters
----------
codes : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
Examples
--------
>>> index = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> index # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
>>> index.drop(['a']) # doctest: +SKIP
MultiIndex([('b', 'y'),
('c', 'z')],
)
>>> index.drop(['x', 'y'], level=1) # doctest: +SKIP
MultiIndex([('c', 'z')],
)
"""
internal = self._internal.resolved_copy
sdf = internal.spark_frame
index_scols = internal.index_spark_columns
if level is None:
scol = index_scols[0]
elif isinstance(level, int):
scol = index_scols[level]
else:
scol = None
for index_spark_column, index_name in zip(
internal.index_spark_columns, internal.index_names
):
if not isinstance(level, tuple):
level = (level,)
if level == index_name:
if scol is not None:
raise ValueError(
"The name {} occurs multiple times, use a level number".format(
name_like_string(level)
)
)
scol = index_spark_column
if scol is None:
raise KeyError("Level {} not found".format(name_like_string(level)))
sdf = sdf[~scol.isin(codes)]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in internal.index_spark_column_names],
index_names=internal.index_names,
index_dtypes=internal.index_dtypes,
column_labels=[],
data_spark_columns=[],
data_dtypes=[],
)
return cast(MultiIndex, DataFrame(internal).index)
def argmax(self) -> None:
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
def argmin(self) -> None:
raise TypeError("reduction operation 'argmin' not allowed for this dtype")
def asof(self, label) -> None:
raise NotImplementedError(
"only the default get_loc method is currently supported for MultiIndex"
)
@property
def is_all_dates(self) -> bool:
"""
is_all_dates always returns False for MultiIndex
Examples
--------
>>> from datetime import datetime
>>> idx = ps.MultiIndex.from_tuples(
... [(datetime(2019, 1, 1, 0, 0, 0), datetime(2019, 1, 1, 0, 0, 0)),
... (datetime(2019, 1, 1, 0, 0, 0), datetime(2019, 1, 1, 0, 0, 0))])
>>> idx # doctest: +SKIP
MultiIndex([('2019-01-01', '2019-01-01'),
('2019-01-01', '2019-01-01')],
)
>>> idx.is_all_dates
False
"""
return False
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeMultiIndex, item):
property_or_func = getattr(MissingPandasLikeMultiIndex, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError("'MultiIndex' object has no attribute '{}'".format(item))
def _get_level_number(self, level) -> Optional[int]:
"""
Return the level number if a valid level is given.
"""
count = self.names.count(level)
if (count > 1) and not isinstance(level, int):
raise ValueError("The name %s occurs multiple times, use a level number" % level)
if level in self.names:
level = self.names.index(level)
elif isinstance(level, int):
nlevels = self.nlevels
if level >= nlevels:
raise IndexError(
"Too many levels: Index has only %d "
"levels, %d is not a valid level number" % (nlevels, level)
)
if level < 0:
if (level + nlevels) < 0:
raise IndexError(
"Too many levels: Index has only %d levels, "
"not %d" % (nlevels, level + 1)
)
level = level + nlevels
else:
raise KeyError("Level %s not found" % str(level))
return None
return level
def get_level_values(self, level) -> Index:
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = ps.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'a')])
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['x', 'x', 'y'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['a', 'b', 'a'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
index_scol = self._internal.index_spark_columns[level]
index_name = self._internal.index_names[level]
index_dtype = self._internal.index_dtypes[level]
internal = self._internal.copy(
index_spark_columns=[index_scol],
index_names=[index_name],
index_dtypes=[index_dtype],
column_labels=[],
data_spark_columns=[],
data_dtypes=[],
)
return DataFrame(internal).index
def insert(self, loc: int, item) -> Index:
"""
Make new MultiIndex inserting new item at location.
Follows Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : MultiIndex
Examples
--------
>>> kmidx = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
>>> kmidx.insert(3, ("h", "j")) # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z'),
('h', 'j')],
)
For negative values
>>> kmidx.insert(-2, ("h", "j")) # doctest: +SKIP
MultiIndex([('a', 'x'),
('h', 'j'),
('b', 'y'),
('c', 'z')],
)
"""
length = len(self)
if loc < 0:
loc = loc + length
if loc < 0:
raise IndexError(
"index {} is out of bounds for axis 0 with size {}".format(
(loc - length), length
)
)
else:
if loc > length:
raise IndexError(
"index {} is out of bounds for axis 0 with size {}".format(loc, length)
)
index_name = self._internal.index_spark_column_names
sdf_before = self.to_frame(name=index_name)[:loc].to_spark()
sdf_middle = Index([item]).to_frame(name=index_name).to_spark()
sdf_after = self.to_frame(name=index_name)[loc:].to_spark()
sdf = sdf_before.union(sdf_middle).union(sdf_after)
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
)
return DataFrame(internal).index
def item(self) -> Tuple[Scalar, ...]:
"""
Return the first element of the underlying data as a python tuple.
Returns
-------
tuple
The first element of MultiIndex.
Raises
------
ValueError
If the data is not length-1.
Examples
--------
>>> kmidx = ps.MultiIndex.from_tuples([('a', 'x')])
>>> kmidx.item()
('a', 'x')
"""
return self._kdf.head(2)._to_internal_pandas().index.item()
def intersection(self, other) -> "MultiIndex":
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : MultiIndex
Examples
--------
>>> midx1 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
>>> midx2 = ps.MultiIndex.from_tuples([("c", "z"), ("d", "w")])
>>> midx1.intersection(midx2).sort_values() # doctest: +SKIP
MultiIndex([('c', 'z')],
)
"""
if isinstance(other, Series) or not is_list_like(other):
raise TypeError("other must be a MultiIndex or a list of tuples")
elif isinstance(other, DataFrame):
raise ValueError("Index data must be 1-dimensional")
elif isinstance(other, MultiIndex):
spark_frame_other = other.to_frame().to_spark()
keep_name = self.names == other.names
elif isinstance(other, Index):
# Always returns an empty MultiIndex if `other` is Index.
return self.to_frame().head(0).index # type: ignore
elif not all(isinstance(item, tuple) for item in other):
raise TypeError("other must be a MultiIndex or a list of tuples")
else:
other = MultiIndex.from_tuples(list(other))
spark_frame_other = other.to_frame().to_spark()
keep_name = True
default_name = [SPARK_INDEX_NAME_FORMAT(i) for i in range(self.nlevels)]
spark_frame_self = self.to_frame(name=default_name).to_spark()
spark_frame_intersected = spark_frame_self.intersect(spark_frame_other)
if keep_name:
index_names = self._internal.index_names
else:
index_names = None
internal = InternalFrame( # TODO: dtypes?
spark_frame=spark_frame_intersected,
index_spark_columns=[scol_for(spark_frame_intersected, col) for col in default_name],
index_names=index_names,
)
return cast(MultiIndex, DataFrame(internal).index)
@property
def hasnans(self):
raise NotImplementedError("hasnans is not defined for MultiIndex")
@property
def inferred_type(self) -> str:
"""
Return a string of the type inferred from the values.
"""
# Always returns "mixed" for MultiIndex
return "mixed"
@property
def asi8(self) -> None:
"""
Integer representation of the values.
"""
# Always returns None for MultiIndex
return None
def factorize(
self, sort: bool = True, na_sentinel: Optional[int] = -1
) -> Tuple[Union["Series", "Index"], pd.Index]:
return MissingPandasLikeMultiIndex.factorize(self, sort=sort, na_sentinel=na_sentinel)
def __iter__(self):
return MissingPandasLikeMultiIndex.__iter__(self)
def _test():
import os
import doctest
import sys
import numpy
from pyspark.sql import SparkSession
import pyspark.pandas.indexes.multi
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.indexes.multi.__dict__.copy()
globs["np"] = numpy
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.indexes.multi tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.indexes.multi,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| []
| []
| [
"SPARK_HOME"
]
| [] | ["SPARK_HOME"] | python | 1 | 0 | |
scripts/camera_module/camera_module.py | from argparse import ArgumentParser
from picamera import PiCamera
from time import sleep
from pathlib import Path
import os
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--resolution',
dest='res',
default='1920-1080',
help='Supported resolutions: 1920-1080, 3280-2464, 1640-1232, 1640-922, 1280-720, 640-480')
parser.add_argument('--output',
dest='out_folder',
default='/camera_output/continuous_captures/',
help='Location to store captured photos.')
parser.add_argument('--interval',
dest='interval',
default=10,
help='Time interval between capture. Default value is 10 seconds.')
parser.add_argument('--iso',
dest='iso',
default=100,
help='Camera ISO value. Default value is 100')
# parse command line arguments
args = parser.parse_args()
# parse resolution
res = args.res.split('-')
res_width = int(res[0])
res_height = int(res[1])
# parse output location
output_folder = args.out_folder
# parse time interval
interval = int(args.interval)
# parse Camera ISO
iso = int(args.iso)
# initialize camera
camera = PiCamera()
camera_wakeup = camera_cooldown = 2
# set camera resolution
camera.resolution = (res_width, res_height)
# set camera ISO
camera.iso = iso
# wait for automatic gain control to settle
sleep(camera_wakeup)
# set output folder
Path(output_folder).mkdir(parents=True, exist_ok=True)
os.chdir(output_folder)
camera.start_preview()
sleep(camera_wakeup)
while True:
for filename in camera.capture_continuous(os.environ['DEVICE_NAME'] + '_img{timestamp:%Y-%m-%d-%H-%M-%S}.jpg'):
camera.start_preview()
sleep(camera_wakeup)
print('image captured... %s' % filename)
sleep(camera_cooldown)
camera.stop_preview()
sleep(interval - camera_wakeup - camera_cooldown)
| []
| []
| [
"DEVICE_NAME"
]
| [] | ["DEVICE_NAME"] | python | 1 | 0 | |
envi_test.go | package envi
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_FromMap(t *testing.T) {
payload := make(map[string]string)
payload["EDITOR"] = "vim"
payload["PAGER"] = "less"
e := NewEnvi()
e.FromMap(payload)
assert.Len(t, e.ToMap(), 2)
}
func Test_LoadEnv(t *testing.T) {
e := NewEnvi()
e.LoadEnv("EDITOR", "PAGER", "HOME")
assert.Len(t, e.ToMap(), 3)
}
func Test_LoadJSONFromFile(t *testing.T) {
t.Run("no file", func(t *testing.T) {
e := NewEnvi()
err := e.LoadJSONFiles()
assert.NoError(t, err)
assert.Len(t, e.ToMap(), 0)
})
t.Run("a valid json file", func(t *testing.T) {
e := NewEnvi()
err := e.LoadJSONFiles("testdata/valid1.json")
assert.NoError(t, err)
assert.Len(t, e.ToMap(), 3)
})
t.Run("2 valid json files", func(t *testing.T) {
e := NewEnvi()
err := e.LoadJSONFiles("testdata/valid1.json", "testdata/valid2.json")
assert.NoError(t, err)
assert.Len(t, e.ToMap(), 4)
})
t.Run("an invalid json file", func(t *testing.T) {
e := NewEnvi()
err := e.LoadJSONFiles("testdata/invalid.json")
assert.Error(t, err)
})
t.Run("a missing file", func(t *testing.T) {
e := NewEnvi()
err := e.LoadJSONFiles("testdata/idontexist.json")
assert.Error(t, err)
})
}
func Test_LoadYAMLFomFile(t *testing.T) {
t.Run("no file", func(t *testing.T) {
e := NewEnvi()
err := e.LoadYAMLFiles()
assert.NoError(t, err)
assert.Len(t, e.ToMap(), 0)
})
t.Run("a valid yaml file", func(t *testing.T) {
e := NewEnvi()
err := e.LoadYAMLFiles("testdata/valid1.yaml")
assert.NoError(t, err)
assert.Len(t, e.ToMap(), 3)
})
t.Run("2 valid yaml files", func(t *testing.T) {
e := NewEnvi()
err := e.LoadYAMLFiles("testdata/valid1.yaml", "testdata/valid2.yaml")
assert.NoError(t, err)
assert.Len(t, e.ToMap(), 4)
})
t.Run("an invalid yaml file", func(t *testing.T) {
e := NewEnvi()
err := e.LoadYAMLFiles("testdata/invalid.yaml")
assert.Error(t, err)
})
t.Run("a missing file", func(t *testing.T) {
e := NewEnvi()
err := e.LoadYAMLFiles("testdata/idontexist.yaml")
assert.Error(t, err)
})
}
func Test_EnsureVars(t *testing.T) {
t.Run("all ensured vars are present", func(t *testing.T) {
payload := make(map[string]string)
payload["EDITOR"] = "vim"
payload["PAGER"] = "less"
e := NewEnvi()
e.FromMap(payload)
err := e.EnsureVars("EDITOR", "PAGER")
assert.NoError(t, err)
})
t.Run("one ensured var is missing", func(t *testing.T) {
payload := make(map[string]string)
payload["EDITOR"] = "vim"
payload["PAGER"] = "less"
e := NewEnvi()
e.FromMap(payload)
err := e.EnsureVars("EDITOR", "PAGER", "HOME")
assert.Error(t, err)
})
t.Run("all ensured vars are missing", func(t *testing.T) {
payload := make(map[string]string)
payload["EDITOR"] = "vim"
payload["PAGER"] = "less"
e := NewEnvi()
e.FromMap(payload)
err := e.EnsureVars("HOME", "MAIL", "URL")
assert.Error(t, err)
})
}
func Test_ToEnv(t *testing.T) {
payload := make(map[string]string)
payload["SCHURZLPURZ"] = "yes, indeed"
e := NewEnvi()
e.FromMap(payload)
e.ToEnv()
assert.Equal(t, "yes, indeed", os.Getenv("SCHURZLPURZ"))
}
func Test_ToMap(t *testing.T) {
payload := make(map[string]string)
payload["EDITOR"] = "vim"
payload["PAGER"] = "less"
e := NewEnvi()
e.FromMap(payload)
vars := e.ToMap()
assert.Len(t, vars, 2)
}
func Test_LoadFile(t *testing.T) {
t.Run("no file", func(t *testing.T) {
e := NewEnvi()
err := e.LoadFile("FILE", "")
assert.Error(t, err)
assert.Len(t, e.ToMap(), 0)
})
t.Run("file with string content", func(t *testing.T) {
e := NewEnvi()
err := e.LoadFile("FILE", filepath.Join("testdata/valid.txt"))
assert.NoError(t, err)
assert.Len(t, e.ToMap(), 1)
assert.Equal(t, "valid string", e.ToMap()["FILE"])
})
} | [
"\"SCHURZLPURZ\""
]
| []
| [
"SCHURZLPURZ"
]
| [] | ["SCHURZLPURZ"] | go | 1 | 0 | |
examples/Transports/FIX/over_one_session.py | """Tests FIX communication between a server and a client."""
import os
import sys
try:
sys.path.append(os.environ['PYFIXMSG_PATH'])
import pyfixmsg
except (KeyError, ImportError):
raise RuntimeError(
'Download pyfixmsg library from '
'https://github.com/Morgan-Stanley/pyfixmsg '
'and set PYFIXMSG_PATH env var to the local path.')
try:
SPEC_FILE = os.environ['FIX_SPEC_FILE']
except KeyError:
raise RuntimeError(
'No spec file set. You should download '
'https://github.com/quickfix/quickfix/blob/master/spec/FIX42.xml '
'file and set FIX_SPEC_FILE to the local path.')
from pyfixmsg.fixmessage import FixMessage
from pyfixmsg.codecs.stringfix import Codec
from pyfixmsg.reference import FixSpec
from testplan.common.utils.context import context
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.testing.multitest.driver.fix import FixServer, FixClient
CODEC = Codec(spec=FixSpec(SPEC_FILE))
def fixmsg(source):
"""
Factory function that forces the codec to our given spec and avoid
passing codec to serialisation and parsing methods.
The codec defaults to a reasonable parser but without repeating groups.
An alternative method is to use the ``to_wire`` and ``from_wire`` methods
to serialise and parse messages and pass the codec explicitly.
"""
# python 2 and 3 compatibility
source = {tag: val.encode('utf-8') for tag, val in source.items()}
msg = FixMessage(source)
msg.codec = CODEC
return msg
@testsuite
class FIXTestsuite(object):
@testcase
def send_and_receive_msg(self, env, result):
"""
Basic FIX messaging between a FixServer and a FixClient.
"""
# First we create a FIX message containing a single tag: 35=D
msg = fixmsg({35: 'D'})
# We use the client to send that message over to the server.
# The message is enriched with the expected session tags (49, 56 etc).
env.client.send(msg)
# We create a FIX message to describe what we expect the server to
# receive. We expect the default FIX version FIX.4.2, the same value
# for tag 35 as given, D, and the correct senderCompID and targetCompID.
exp_msg = fixmsg({8: 'FIX.4.2', 35: 'D',
49: env.client.sender,
56: env.client.target})
# We receive the message from the server.
received = env.server.receive()
# We assert that we expect a message that matches the message we sent.
# We restrict the comparison to tags 8, 35, 49 and 56, since we want to
# ignore the other message-level tags such as 9 and 10 that are
# automatically added by the connectors.
result.fix.match(exp_msg, received,
description='Message sent by client match.',
include_tags=[8, 35, 49, 56])
# Now, we create a response message from the server, confirming receipt
# of order (message type 8)
msg = fixmsg({35: '8'})
# We use the server to send the response to the client.
env.server.send(msg)
# We create a FIX message to describe what we expect the client to
# receive. The default FIX version FIX.4.2 is expected, together with
# the right senderCompID and targetCompID.
exp_msg = fixmsg({8: 'FIX.4.2',
35: '8',
49: env.client.target,
56: env.client.sender})
# We receive the message from the client.
received = env.client.receive()
# We expect a message that matches the message we sent. We restrict the
# comparison to tags 8, 35, 49 and 56, since we want to ignore the
# other message-level tags such as 9 and 10 that are automatically
# added by the connectors.
result.fix.match(exp_msg, received,
description='Message sent by server match.',
include_tags=[8, 35, 49, 56])
def get_multitest():
"""
Creates and returns a new MultiTest instance to be added to the plan.
The environment is a server and a client connecting using the context
functionality that retrieves host/port of the server after is started.
"""
test = MultiTest(name='OverOneSession',
suites=[FIXTestsuite()],
environment=[
FixServer(name='server',
msgclass=FixMessage,
codec=CODEC),
FixClient(name='client',
host=context('server', '{{host}}'),
port=context('server', '{{port}}'),
sender='TW',
target='ISLD',
msgclass=FixMessage,
codec=CODEC)])
return test
| []
| []
| [
"PYFIXMSG_PATH",
"FIX_SPEC_FILE"
]
| [] | ["PYFIXMSG_PATH", "FIX_SPEC_FILE"] | python | 2 | 0 | |
frontends/mrxl/mrxl/main.py | import sys
import json
import argparse
from .parse import parse
from .gen_futil import emit
from .interp import interp, InterpError
def main():
parser = argparse.ArgumentParser('Interpret a MrXL program, or compile it to FuTIL.')
parser.add_argument('--i', '--interpret', action='store_true', help='Interpret the input MrXL program (leave this off to compile)')
parser.add_argument('--data', metavar='<datafile>', type=str, help="Input data, required to interpret")
parser.add_argument('filename', metavar='<file>', type=str, help="MrXL program to compile or interpet")
args = parser.parse_args()
with open(args.filename) as f:
txt = f.read()
if args.data:
with open(args.data) as f:
data = json.load(f)
ast = parse(txt)
if args.i:
try:
print(interp(ast, data))
except InterpError as exc:
print(str(exc), file=sys.stderr)
sys.exit(1)
else:
emit(ast)
sys.exit(0)
| []
| []
| []
| [] | [] | python | null | null | null |
settings.py | import json
import os
import pathlib
from decouple import config
LIVE_DEMO_MODE = config('DEMO_MODE', cast=bool, default=False)
PORT = config('PORT', cast=int, default=5000)
APP_URL = 'https://bachelor-thesis.herokuapp.com/'
DEBUG_MODE = config('DEBUG', cast=bool, default=False)
NO_DELAYS = config('NO_DELAYS', cast=bool, default=False)
REDIS_URL = config('REDIS_URL')
DIALOGFLOW_ACCESS_TOKEN = config('DIALOGFLOW_ACCESS_TOKEN')
FACEBOOK_ACCESS_TOKEN = config('FACEBOOK_ACCESS_TOKEN')
TELEGRAM_ACCESS_TOKEN = config('TELEGRAM_ACCESS_TOKEN')
TWILIO_ACCESS_TOKEN = config('TWILIO_ACCESS_TOKEN')
TWILIO_ACCOUNT_SID = config('TWILIO_ACCOUNT_SID')
DATABASE_URL = config('DATABASE_URL')
ENABLE_CONVERSATION_RECORDING = config('RECORD_CONVERSATIONS', cast=bool, default=True)
CONTEXT_LOOKUP_RECENCY = 15
SUPPORT_CHANNEL_ID = -1001265422831
GOOGLE_SERVICE_ACCOUNT_KEY = config('GOOGLE_SERVICE_ACCOUNT_KEY').replace("\\n", "\n")
# Insert google private key into a template of the json configuration and add it to environment vars
_root_dir = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
if not os.path.exists('tmp'):
os.makedirs('tmp')
google_service_account_file = _root_dir / 'tmp' / 'service-account-file.json'
template = json.load(open(_root_dir / "google-service-template.json", 'r'))
template["private_key"] = GOOGLE_SERVICE_ACCOUNT_KEY
json.dump(template, open(google_service_account_file, 'w+'))
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(google_service_account_file)
# Whether to remove the ForceReply markup in Telegram for any non-keyboard message (useful for demo)
ALWAYS_REMOVE_MARKUP = LIVE_DEMO_MODE
| []
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | python | 1 | 0 | |
_examples/view/template_jet_0/main.go | // Package main shows how to use jet template parser with ease using the Iris built-in Jet view engine.
// This example is customized fork of https://github.com/CloudyKit/jet/tree/master/examples/todos, so you can
// notice the differences side by side.
package main
import (
"bytes"
"encoding/base64"
"fmt"
"os"
"reflect"
"strings"
"github.com/kataras/iris/v12"
"github.com/kataras/iris/v12/view"
)
type tTODO struct {
Text string
Done bool
}
type doneTODOs struct {
list map[string]*tTODO
keys []string
len int
i int
}
func (dt *doneTODOs) New(todos map[string]*tTODO) *doneTODOs {
dt.len = len(todos)
for k := range todos {
dt.keys = append(dt.keys, k)
}
dt.list = todos
return dt
}
// Range satisfies the jet.Ranger interface and only returns TODOs that are done,
// even when the list contains TODOs that are not done.
func (dt *doneTODOs) Range() (reflect.Value, reflect.Value, bool) {
for dt.i < dt.len {
key := dt.keys[dt.i]
dt.i++
if dt.list[key].Done {
return reflect.ValueOf(key), reflect.ValueOf(dt.list[key]), false
}
}
return reflect.Value{}, reflect.Value{}, true
}
// Note: jet version 4 requires this.
func (dt *doneTODOs) ProvidesIndex() bool { return true }
func (dt *doneTODOs) Render(r *view.JetRuntime) {
r.Write([]byte("custom renderer"))
}
// Render implements jet.Renderer interface
func (t *tTODO) Render(r *view.JetRuntime) {
done := "yes"
if !t.Done {
done = "no"
}
r.Write([]byte(fmt.Sprintf("TODO: %s (done: %s)", t.Text, done)))
}
func main() {
//
// Type aliases:
// view.JetRuntimeVars = jet.VarMap
// view.JetRuntime = jet.Runtime
// view.JetArguments = jet.Arguments
//
// Iris also gives you the ability to put runtime variables
// from middlewares as well, by:
// view.AddJetRuntimeVars(ctx, vars)
// or tmpl.AddRuntimeVars(ctx, vars)
app := iris.New()
tmpl := iris.Jet("./views", ".jet") // <--
tmpl.Reload(true) // remove in production.
tmpl.AddFunc("base64", func(a view.JetArguments) reflect.Value {
a.RequireNumOfArguments("base64", 1, 1)
buffer := bytes.NewBuffer(nil)
fmt.Fprint(buffer, a.Get(0))
return reflect.ValueOf(base64.URLEncoding.EncodeToString(buffer.Bytes()))
})
app.RegisterView(tmpl) // <--
todos := map[string]*tTODO{
"example-todo-1": {Text: "Add an show todo page to the example project", Done: true},
"example-todo-2": {Text: "Add an add todo page to the example project"},
"example-todo-3": {Text: "Add an update todo page to the example project"},
"example-todo-4": {Text: "Add an delete todo page to the example project", Done: true},
}
app.Get("/", func(ctx iris.Context) {
err := ctx.View("todos/index.jet", todos) // <--
// Note that the `ctx.View` already logs the error if logger level is allowing it and returns the error.
if err != nil {
ctx.StopWithText(iris.StatusInternalServerError, "Templates not rendered!")
}
})
app.Get("/todo", func(ctx iris.Context) {
id := ctx.URLParam("id")
todo, ok := todos[id]
if !ok {
ctx.Redirect("/")
return
}
ctx.ViewData("title", "Show TODO")
ctx.View("todos/show.jet", todo)
})
app.Get("/all-done", func(ctx iris.Context) {
// vars := make(view.JetRuntimeVars)
// vars.Set("showingAllDone", true)
// vars.Set("title", "Todos - All Done")
// view.AddJetRuntimeVars(ctx, vars)
// ctx.View("todos/index.jet", (&doneTODOs{}).New(todos))
//
// OR
ctx.ViewData("showingAllDone", true)
ctx.ViewData("title", "Todos - All Done")
// Use ctx.ViewData("_jet", jetData)
// if using as middleware and you want
// to pre-set the value or even change it later on from another next middleware.
// ctx.ViewData("_jet", (&doneTODOs{}).New(todos))
// and ctx.View("todos/index.jet")
// OR
ctx.View("todos/index.jet", (&doneTODOs{}).New(todos))
})
port := os.Getenv("PORT")
if len(port) == 0 {
port = ":8080"
} else if !strings.HasPrefix(":", port) {
port = ":" + port
}
app.Listen(port)
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
src/cmd/go/go_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main_test
import (
"bytes"
"debug/elf"
"debug/macho"
"debug/pe"
"encoding/binary"
"flag"
"fmt"
"go/format"
"internal/race"
"internal/testenv"
"io"
"io/fs"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"testing"
"time"
"cmd/go/internal/cache"
"cmd/go/internal/cfg"
"cmd/go/internal/robustio"
"cmd/internal/sys"
)
var (
canRace = false // whether we can run the race detector
canCgo = false // whether we can use cgo
canMSan = false // whether we can run the memory sanitizer
)
var exeSuffix string = func() string {
if runtime.GOOS == "windows" {
return ".exe"
}
return ""
}()
func tooSlow(t *testing.T) {
if testing.Short() {
// In -short mode; skip test, except run it on the {darwin,linux,windows}/amd64 builders.
if testenv.Builder() != "" && runtime.GOARCH == "amd64" && (runtime.GOOS == "linux" || runtime.GOOS == "darwin" || runtime.GOOS == "windows") {
return
}
t.Helper()
t.Skip("skipping test in -short mode")
}
}
// testGOROOT is the GOROOT to use when running testgo, a cmd/go binary
// build from this process's current GOROOT, but run from a different
// (temp) directory.
var testGOROOT string
var testCC string
var testGOCACHE string
var testGo string
var testTmpDir string
var testBin string
// The TestMain function creates a go command for testing purposes and
// deletes it after the tests have been run.
func TestMain(m *testing.M) {
// $GO_GCFLAGS a compiler debug flag known to cmd/dist, make.bash, etc.
// It is not a standard go command flag; use os.Getenv, not cfg.Getenv.
if os.Getenv("GO_GCFLAGS") != "" {
fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go
fmt.Printf("cmd/go test is not compatible with $GO_GCFLAGS being set\n")
fmt.Printf("SKIP\n")
return
}
flag.Parse()
if *proxyAddr != "" {
StartProxy()
select {}
}
// Run with a temporary TMPDIR to check that the tests don't
// leave anything behind.
topTmpdir, err := ioutil.TempDir("", "cmd-go-test-")
if err != nil {
log.Fatal(err)
}
if !*testWork {
defer removeAll(topTmpdir)
}
os.Setenv(tempEnvName(), topTmpdir)
dir, err := ioutil.TempDir(topTmpdir, "tmpdir")
if err != nil {
log.Fatal(err)
}
testTmpDir = dir
if !*testWork {
defer removeAll(testTmpDir)
}
testGOCACHE = cache.DefaultDir()
if testenv.HasGoBuild() {
testBin = filepath.Join(testTmpDir, "testbin")
if err := os.Mkdir(testBin, 0777); err != nil {
log.Fatal(err)
}
testGo = filepath.Join(testBin, "go"+exeSuffix)
args := []string{"build", "-tags", "testgo", "-o", testGo}
if race.Enabled {
args = append(args, "-race")
}
gotool, err := testenv.GoTool()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
goEnv := func(name string) string {
out, err := exec.Command(gotool, "env", name).CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "go env %s: %v\n%s", name, err, out)
os.Exit(2)
}
return strings.TrimSpace(string(out))
}
testGOROOT = goEnv("GOROOT")
os.Setenv("TESTGO_GOROOT", testGOROOT)
// Ensure that GOROOT is set explicitly.
// Otherwise, if the toolchain was built with GOROOT_FINAL set but has not
// yet been moved to its final location, programs that invoke runtime.GOROOT
// may accidentally use the wrong path.
os.Setenv("GOROOT", testGOROOT)
// The whole GOROOT/pkg tree was installed using the GOHOSTOS/GOHOSTARCH
// toolchain (installed in GOROOT/pkg/tool/GOHOSTOS_GOHOSTARCH).
// The testgo.exe we are about to create will be built for GOOS/GOARCH,
// which means it will use the GOOS/GOARCH toolchain
// (installed in GOROOT/pkg/tool/GOOS_GOARCH).
// If these are not the same toolchain, then the entire standard library
// will look out of date (the compilers in those two different tool directories
// are built for different architectures and have different build IDs),
// which will cause many tests to do unnecessary rebuilds and some
// tests to attempt to overwrite the installed standard library.
// Bail out entirely in this case.
hostGOOS := goEnv("GOHOSTOS")
hostGOARCH := goEnv("GOHOSTARCH")
if hostGOOS != runtime.GOOS || hostGOARCH != runtime.GOARCH {
fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go
fmt.Printf("cmd/go test is not compatible with GOOS/GOARCH != GOHOSTOS/GOHOSTARCH (%s/%s != %s/%s)\n", runtime.GOOS, runtime.GOARCH, hostGOOS, hostGOARCH)
fmt.Printf("SKIP\n")
return
}
buildCmd := exec.Command(gotool, args...)
buildCmd.Env = append(os.Environ(), "GOFLAGS=-mod=vendor")
out, err := buildCmd.CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "building testgo failed: %v\n%s", err, out)
os.Exit(2)
}
out, err = exec.Command(gotool, "env", "CC").CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "could not find testing CC: %v\n%s", err, out)
os.Exit(2)
}
testCC = strings.TrimSpace(string(out))
cmd := exec.Command(testGo, "env", "CGO_ENABLED")
cmd.Stderr = new(strings.Builder)
if out, err := cmd.Output(); err != nil {
fmt.Fprintf(os.Stderr, "running testgo failed: %v\n%s", err, cmd.Stderr)
os.Exit(2)
} else {
canCgo, err = strconv.ParseBool(strings.TrimSpace(string(out)))
if err != nil {
fmt.Fprintf(os.Stderr, "can't parse go env CGO_ENABLED output: %v\n", strings.TrimSpace(string(out)))
}
}
out, err = exec.Command(gotool, "env", "GOCACHE").CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "could not find testing GOCACHE: %v\n%s", err, out)
os.Exit(2)
}
testGOCACHE = strings.TrimSpace(string(out))
canMSan = canCgo && sys.MSanSupported(runtime.GOOS, runtime.GOARCH)
canRace = canCgo && sys.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH)
// The race detector doesn't work on Alpine Linux:
// golang.org/issue/14481
// gccgo does not support the race detector.
if isAlpineLinux() || runtime.Compiler == "gccgo" {
canRace = false
}
}
// Don't let these environment variables confuse the test.
os.Setenv("GOENV", "off")
os.Unsetenv("GOBIN")
os.Unsetenv("GOPATH")
os.Unsetenv("GIT_ALLOW_PROTOCOL")
os.Setenv("HOME", "/test-go-home-does-not-exist")
// On some systems the default C compiler is ccache.
// Setting HOME to a non-existent directory will break
// those systems. Disable ccache and use real compiler. Issue 17668.
os.Setenv("CCACHE_DISABLE", "1")
if cfg.Getenv("GOCACHE") == "" {
os.Setenv("GOCACHE", testGOCACHE) // because $HOME is gone
}
r := m.Run()
if !*testWork {
removeAll(testTmpDir) // os.Exit won't run defer
}
if !*testWork {
// There shouldn't be anything left in topTmpdir.
dirf, err := os.Open(topTmpdir)
if err != nil {
log.Fatal(err)
}
names, err := dirf.Readdirnames(0)
if err != nil {
log.Fatal(err)
}
if len(names) > 0 {
log.Fatalf("unexpected files left in tmpdir: %v", names)
}
removeAll(topTmpdir)
}
os.Exit(r)
}
func isAlpineLinux() bool {
if runtime.GOOS != "linux" {
return false
}
fi, err := os.Lstat("/etc/alpine-release")
return err == nil && fi.Mode().IsRegular()
}
// The length of an mtime tick on this system. This is an estimate of
// how long we need to sleep to ensure that the mtime of two files is
// different.
// We used to try to be clever but that didn't always work (see golang.org/issue/12205).
var mtimeTick time.Duration = 1 * time.Second
// Manage a single run of the testgo binary.
type testgoData struct {
t *testing.T
temps []string
env []string
tempdir string
ran bool
inParallel bool
stdout, stderr bytes.Buffer
execDir string // dir for tg.run
}
// skipIfGccgo skips the test if using gccgo.
func skipIfGccgo(t *testing.T, msg string) {
if runtime.Compiler == "gccgo" {
t.Skipf("skipping test not supported on gccgo: %s", msg)
}
}
// testgo sets up for a test that runs testgo.
func testgo(t *testing.T) *testgoData {
t.Helper()
testenv.MustHaveGoBuild(t)
testenv.SkipIfShortAndSlow(t)
return &testgoData{t: t}
}
// must gives a fatal error if err is not nil.
func (tg *testgoData) must(err error) {
tg.t.Helper()
if err != nil {
tg.t.Fatal(err)
}
}
// check gives a test non-fatal error if err is not nil.
func (tg *testgoData) check(err error) {
tg.t.Helper()
if err != nil {
tg.t.Error(err)
}
}
// parallel runs the test in parallel by calling t.Parallel.
func (tg *testgoData) parallel() {
tg.t.Helper()
if tg.ran {
tg.t.Fatal("internal testsuite error: call to parallel after run")
}
for _, e := range tg.env {
if strings.HasPrefix(e, "GOROOT=") || strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
val := e[strings.Index(e, "=")+1:]
if strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata") {
tg.t.Fatalf("internal testsuite error: call to parallel with testdata in environment (%s)", e)
}
}
}
tg.inParallel = true
tg.t.Parallel()
}
// pwd returns the current directory.
func (tg *testgoData) pwd() string {
tg.t.Helper()
wd, err := os.Getwd()
if err != nil {
tg.t.Fatalf("could not get working directory: %v", err)
}
return wd
}
// sleep sleeps for one tick, where a tick is a conservative estimate
// of how long it takes for a file modification to get a different
// mtime.
func (tg *testgoData) sleep() {
time.Sleep(mtimeTick)
}
// setenv sets an environment variable to use when running the test go
// command.
func (tg *testgoData) setenv(name, val string) {
tg.t.Helper()
if tg.inParallel && (name == "GOROOT" || name == "GOPATH" || name == "GOBIN") && (strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata")) {
tg.t.Fatalf("internal testsuite error: call to setenv with testdata (%s=%s) after parallel", name, val)
}
tg.unsetenv(name)
tg.env = append(tg.env, name+"="+val)
}
// unsetenv removes an environment variable.
func (tg *testgoData) unsetenv(name string) {
if tg.env == nil {
tg.env = append([]string(nil), os.Environ()...)
tg.env = append(tg.env, "GO111MODULE=off")
}
for i, v := range tg.env {
if strings.HasPrefix(v, name+"=") {
tg.env = append(tg.env[:i], tg.env[i+1:]...)
break
}
}
}
func (tg *testgoData) goTool() string {
return testGo
}
// doRun runs the test go command, recording stdout and stderr and
// returning exit status.
func (tg *testgoData) doRun(args []string) error {
tg.t.Helper()
if tg.inParallel {
for _, arg := range args {
if strings.HasPrefix(arg, "testdata") || strings.HasPrefix(arg, "./testdata") {
tg.t.Fatal("internal testsuite error: parallel run using testdata")
}
}
}
hasGoroot := false
for _, v := range tg.env {
if strings.HasPrefix(v, "GOROOT=") {
hasGoroot = true
break
}
}
prog := tg.goTool()
if !hasGoroot {
tg.setenv("GOROOT", testGOROOT)
}
tg.t.Logf("running testgo %v", args)
cmd := exec.Command(prog, args...)
tg.stdout.Reset()
tg.stderr.Reset()
cmd.Dir = tg.execDir
cmd.Stdout = &tg.stdout
cmd.Stderr = &tg.stderr
cmd.Env = tg.env
status := cmd.Run()
if tg.stdout.Len() > 0 {
tg.t.Log("standard output:")
tg.t.Log(tg.stdout.String())
}
if tg.stderr.Len() > 0 {
tg.t.Log("standard error:")
tg.t.Log(tg.stderr.String())
}
tg.ran = true
return status
}
// run runs the test go command, and expects it to succeed.
func (tg *testgoData) run(args ...string) {
tg.t.Helper()
if status := tg.doRun(args); status != nil {
wd, _ := os.Getwd()
tg.t.Logf("go %v failed unexpectedly in %s: %v", args, wd, status)
tg.t.FailNow()
}
}
// runFail runs the test go command, and expects it to fail.
func (tg *testgoData) runFail(args ...string) {
tg.t.Helper()
if status := tg.doRun(args); status == nil {
tg.t.Fatal("testgo succeeded unexpectedly")
} else {
tg.t.Log("testgo failed as expected:", status)
}
}
// runGit runs a git command, and expects it to succeed.
func (tg *testgoData) runGit(dir string, args ...string) {
tg.t.Helper()
cmd := exec.Command("git", args...)
tg.stdout.Reset()
tg.stderr.Reset()
cmd.Stdout = &tg.stdout
cmd.Stderr = &tg.stderr
cmd.Dir = dir
cmd.Env = tg.env
status := cmd.Run()
if tg.stdout.Len() > 0 {
tg.t.Log("git standard output:")
tg.t.Log(tg.stdout.String())
}
if tg.stderr.Len() > 0 {
tg.t.Log("git standard error:")
tg.t.Log(tg.stderr.String())
}
if status != nil {
tg.t.Logf("git %v failed unexpectedly: %v", args, status)
tg.t.FailNow()
}
}
// getStdout returns standard output of the testgo run as a string.
func (tg *testgoData) getStdout() string {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: stdout called before run")
}
return tg.stdout.String()
}
// getStderr returns standard error of the testgo run as a string.
func (tg *testgoData) getStderr() string {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: stdout called before run")
}
return tg.stderr.String()
}
// doGrepMatch looks for a regular expression in a buffer, and returns
// whether it is found. The regular expression is matched against
// each line separately, as with the grep command.
func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: grep called before run")
}
re := regexp.MustCompile(match)
for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) {
if re.Match(ln) {
return true
}
}
return false
}
// doGrep looks for a regular expression in a buffer and fails if it
// is not found. The name argument is the name of the output we are
// searching, "output" or "error". The msg argument is logged on
// failure.
func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) {
tg.t.Helper()
if !tg.doGrepMatch(match, b) {
tg.t.Log(msg)
tg.t.Logf("pattern %v not found in standard %s", match, name)
tg.t.FailNow()
}
}
// grepStdout looks for a regular expression in the test run's
// standard output and fails, logging msg, if it is not found.
func (tg *testgoData) grepStdout(match, msg string) {
tg.t.Helper()
tg.doGrep(match, &tg.stdout, "output", msg)
}
// grepStderr looks for a regular expression in the test run's
// standard error and fails, logging msg, if it is not found.
func (tg *testgoData) grepStderr(match, msg string) {
tg.t.Helper()
tg.doGrep(match, &tg.stderr, "error", msg)
}
// grepBoth looks for a regular expression in the test run's standard
// output or stand error and fails, logging msg, if it is not found.
func (tg *testgoData) grepBoth(match, msg string) {
tg.t.Helper()
if !tg.doGrepMatch(match, &tg.stdout) && !tg.doGrepMatch(match, &tg.stderr) {
tg.t.Log(msg)
tg.t.Logf("pattern %v not found in standard output or standard error", match)
tg.t.FailNow()
}
}
// doGrepNot looks for a regular expression in a buffer and fails if
// it is found. The name and msg arguments are as for doGrep.
func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) {
tg.t.Helper()
if tg.doGrepMatch(match, b) {
tg.t.Log(msg)
tg.t.Logf("pattern %v found unexpectedly in standard %s", match, name)
tg.t.FailNow()
}
}
// grepStdoutNot looks for a regular expression in the test run's
// standard output and fails, logging msg, if it is found.
func (tg *testgoData) grepStdoutNot(match, msg string) {
tg.t.Helper()
tg.doGrepNot(match, &tg.stdout, "output", msg)
}
// grepStderrNot looks for a regular expression in the test run's
// standard error and fails, logging msg, if it is found.
func (tg *testgoData) grepStderrNot(match, msg string) {
tg.t.Helper()
tg.doGrepNot(match, &tg.stderr, "error", msg)
}
// grepBothNot looks for a regular expression in the test run's
// standard output or standard error and fails, logging msg, if it is
// found.
func (tg *testgoData) grepBothNot(match, msg string) {
tg.t.Helper()
if tg.doGrepMatch(match, &tg.stdout) || tg.doGrepMatch(match, &tg.stderr) {
tg.t.Log(msg)
tg.t.Fatalf("pattern %v found unexpectedly in standard output or standard error", match)
}
}
// doGrepCount counts the number of times a regexp is seen in a buffer.
func (tg *testgoData) doGrepCount(match string, b *bytes.Buffer) int {
tg.t.Helper()
if !tg.ran {
tg.t.Fatal("internal testsuite error: doGrepCount called before run")
}
re := regexp.MustCompile(match)
c := 0
for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) {
if re.Match(ln) {
c++
}
}
return c
}
// grepCountBoth returns the number of times a regexp is seen in both
// standard output and standard error.
func (tg *testgoData) grepCountBoth(match string) int {
tg.t.Helper()
return tg.doGrepCount(match, &tg.stdout) + tg.doGrepCount(match, &tg.stderr)
}
// creatingTemp records that the test plans to create a temporary file
// or directory. If the file or directory exists already, it will be
// removed. When the test completes, the file or directory will be
// removed if it exists.
func (tg *testgoData) creatingTemp(path string) {
tg.t.Helper()
if filepath.IsAbs(path) && !strings.HasPrefix(path, tg.tempdir) {
tg.t.Fatalf("internal testsuite error: creatingTemp(%q) with absolute path not in temporary directory", path)
}
tg.must(robustio.RemoveAll(path))
tg.temps = append(tg.temps, path)
}
// makeTempdir makes a temporary directory for a run of testgo. If
// the temporary directory was already created, this does nothing.
func (tg *testgoData) makeTempdir() {
tg.t.Helper()
if tg.tempdir == "" {
var err error
tg.tempdir, err = ioutil.TempDir("", "gotest")
tg.must(err)
}
}
// tempFile adds a temporary file for a run of testgo.
func (tg *testgoData) tempFile(path, contents string) {
tg.t.Helper()
tg.makeTempdir()
tg.must(os.MkdirAll(filepath.Join(tg.tempdir, filepath.Dir(path)), 0755))
bytes := []byte(contents)
if strings.HasSuffix(path, ".go") {
formatted, err := format.Source(bytes)
if err == nil {
bytes = formatted
}
}
tg.must(ioutil.WriteFile(filepath.Join(tg.tempdir, path), bytes, 0644))
}
// tempDir adds a temporary directory for a run of testgo.
func (tg *testgoData) tempDir(path string) {
tg.t.Helper()
tg.makeTempdir()
if err := os.MkdirAll(filepath.Join(tg.tempdir, path), 0755); err != nil && !os.IsExist(err) {
tg.t.Fatal(err)
}
}
// path returns the absolute pathname to file with the temporary
// directory.
func (tg *testgoData) path(name string) string {
tg.t.Helper()
if tg.tempdir == "" {
tg.t.Fatalf("internal testsuite error: path(%q) with no tempdir", name)
}
if name == "." {
return tg.tempdir
}
return filepath.Join(tg.tempdir, name)
}
// mustExist fails if path does not exist.
func (tg *testgoData) mustExist(path string) {
tg.t.Helper()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
tg.t.Fatalf("%s does not exist but should", path)
}
tg.t.Fatalf("%s stat failed: %v", path, err)
}
}
// mustNotExist fails if path exists.
func (tg *testgoData) mustNotExist(path string) {
tg.t.Helper()
if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) {
tg.t.Fatalf("%s exists but should not (%v)", path, err)
}
}
// mustHaveContent succeeds if filePath is a path to a file,
// and that file is readable and not empty.
func (tg *testgoData) mustHaveContent(filePath string) {
tg.mustExist(filePath)
f, err := os.Stat(filePath)
if err != nil {
tg.t.Fatal(err)
}
if f.Size() == 0 {
tg.t.Fatalf("expected %s to have data, but is empty", filePath)
}
}
// wantExecutable fails with msg if path is not executable.
func (tg *testgoData) wantExecutable(path, msg string) {
tg.t.Helper()
if st, err := os.Stat(path); err != nil {
if !os.IsNotExist(err) {
tg.t.Log(err)
}
tg.t.Fatal(msg)
} else {
if runtime.GOOS != "windows" && st.Mode()&0111 == 0 {
tg.t.Fatalf("binary %s exists but is not executable", path)
}
}
}
// isStale reports whether pkg is stale, and why
func (tg *testgoData) isStale(pkg string) (bool, string) {
tg.t.Helper()
tg.run("list", "-f", "{{.Stale}}:{{.StaleReason}}", pkg)
v := strings.TrimSpace(tg.getStdout())
f := strings.SplitN(v, ":", 2)
if len(f) == 2 {
switch f[0] {
case "true":
return true, f[1]
case "false":
return false, f[1]
}
}
tg.t.Fatalf("unexpected output checking staleness of package %v: %v", pkg, v)
panic("unreachable")
}
// wantStale fails with msg if pkg is not stale.
func (tg *testgoData) wantStale(pkg, reason, msg string) {
tg.t.Helper()
stale, why := tg.isStale(pkg)
if !stale {
tg.t.Fatal(msg)
}
// We always accept the reason as being "not installed but
// available in build cache", because when that is the case go
// list doesn't try to sort out the underlying reason why the
// package is not installed.
if reason == "" && why != "" || !strings.Contains(why, reason) && !strings.Contains(why, "not installed but available in build cache") {
tg.t.Errorf("wrong reason for Stale=true: %q, want %q", why, reason)
}
}
// wantNotStale fails with msg if pkg is stale.
func (tg *testgoData) wantNotStale(pkg, reason, msg string) {
tg.t.Helper()
stale, why := tg.isStale(pkg)
if stale {
tg.t.Fatal(msg)
}
if reason == "" && why != "" || !strings.Contains(why, reason) {
tg.t.Errorf("wrong reason for Stale=false: %q, want %q", why, reason)
}
}
// If -testwork is specified, the test prints the name of the temp directory
// and does not remove it when done, so that a programmer can
// poke at the test file tree afterward.
var testWork = flag.Bool("testwork", false, "")
// cleanup cleans up a test that runs testgo.
func (tg *testgoData) cleanup() {
tg.t.Helper()
if *testWork {
tg.t.Logf("TESTWORK=%s\n", tg.path("."))
return
}
for _, path := range tg.temps {
tg.check(removeAll(path))
}
if tg.tempdir != "" {
tg.check(removeAll(tg.tempdir))
}
}
func removeAll(dir string) error {
// module cache has 0444 directories;
// make them writable in order to remove content.
filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {
// chmod not only directories, but also things that we couldn't even stat
// due to permission errors: they may also be unreadable directories.
if err != nil || info.IsDir() {
os.Chmod(path, 0777)
}
return nil
})
return robustio.RemoveAll(dir)
}
// failSSH puts an ssh executable in the PATH that always fails.
// This is to stub out uses of ssh by go get.
func (tg *testgoData) failSSH() {
tg.t.Helper()
wd, err := os.Getwd()
if err != nil {
tg.t.Fatal(err)
}
fail := filepath.Join(wd, "testdata/failssh")
tg.setenv("PATH", fmt.Sprintf("%v%c%v", fail, filepath.ListSeparator, os.Getenv("PATH")))
}
func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
if testing.Short() {
t.Skip("skipping lengthy test in short mode")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// Copy the runtime packages into a temporary GOROOT
// so that we can change files.
for _, copydir := range []string{
"src/runtime",
"src/internal/bytealg",
"src/internal/cpu",
"src/math/bits",
"src/unsafe",
filepath.Join("pkg", runtime.GOOS+"_"+runtime.GOARCH),
filepath.Join("pkg/tool", runtime.GOOS+"_"+runtime.GOARCH),
"pkg/include",
} {
srcdir := filepath.Join(testGOROOT, copydir)
tg.tempDir(filepath.Join("goroot", copydir))
err := filepath.Walk(srcdir,
func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
srcrel, err := filepath.Rel(srcdir, path)
if err != nil {
return err
}
dest := filepath.Join("goroot", copydir, srcrel)
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
tg.tempFile(dest, string(data))
if err := os.Chmod(tg.path(dest), info.Mode()|0200); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
tg.setenv("GOROOT", tg.path("goroot"))
addVar := func(name string, idx int) (restore func()) {
data, err := ioutil.ReadFile(name)
if err != nil {
t.Fatal(err)
}
old := data
data = append(data, fmt.Sprintf("var DummyUnusedVar%d bool\n", idx)...)
if err := ioutil.WriteFile(name, append(data, '\n'), 0666); err != nil {
t.Fatal(err)
}
tg.sleep()
return func() {
if err := ioutil.WriteFile(name, old, 0666); err != nil {
t.Fatal(err)
}
}
}
// Every main package depends on the "runtime".
tg.tempFile("d1/src/p1/p1.go", `package main; func main(){}`)
tg.setenv("GOPATH", tg.path("d1"))
// Pass -i flag to rebuild everything outdated.
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, before any changes")
// Changing mtime of runtime/internal/sys/sys.go
// should have no effect: only the content matters.
// In fact this should be true even outside a release branch.
sys := tg.path("goroot/src/runtime/internal/sys/sys.go")
tg.sleep()
restore := addVar(sys, 0)
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after updating mtime of runtime/internal/sys/sys.go")
// But changing content of any file should have an effect.
// Previously zversion.go was the only one that mattered;
// now they all matter, so keep using sys.go.
restore = addVar(sys, 1)
defer restore()
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go")
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release")
addVar(sys, 2)
tg.wantStale("p1", "stale dependency: runtime", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release")
// Restore to "old" release.
restore()
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release")
}
// cmd/go: custom import path checking should not apply to Go packages without import comment.
func TestIssue10952(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
const importPath = "github.com/zombiezen/go-get-issue-10952"
tg.run("get", "-d", "-u", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "remote", "set-url", "origin", "https://"+importPath+".git")
tg.run("get", "-d", "-u", importPath)
}
func TestIssue16471(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.must(os.MkdirAll(tg.path("src/rsc.io/go-get-issue-10952"), 0755))
tg.runGit(tg.path("src/rsc.io"), "clone", "https://github.com/zombiezen/go-get-issue-10952")
tg.runFail("get", "-u", "rsc.io/go-get-issue-10952")
tg.grepStderr("rsc.io/go-get-issue-10952 is a custom import path for https://github.com/rsc/go-get-issue-10952, but .* is checked out from https://github.com/zombiezen/go-get-issue-10952", "did not detect updated import path")
}
// Test git clone URL that uses SCP-like syntax and custom import path checking.
func TestIssue11457(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
const importPath = "rsc.io/go-get-issue-11457"
tg.run("get", "-d", "-u", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "remote", "set-url", "origin", "[email protected]:rsc/go-get-issue-11457")
// At this time, custom import path checking compares remotes verbatim (rather than
// just the host and path, skipping scheme and user), so we expect go get -u to fail.
// However, the goal of this test is to verify that gitRemoteRepo correctly parsed
// the SCP-like syntax, and we expect it to appear in the error message.
tg.runFail("get", "-d", "-u", importPath)
want := " is checked out from ssh://[email protected]/rsc/go-get-issue-11457"
if !strings.HasSuffix(strings.TrimSpace(tg.getStderr()), want) {
t.Error("expected clone URL to appear in stderr")
}
}
func TestGetGitDefaultBranch(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
// This repo has two branches, master and another-branch.
// The another-branch is the default that you get from 'git clone'.
// The go get command variants should not override this.
const importPath = "github.com/rsc/go-get-default-branch"
tg.run("get", "-d", importPath)
repoDir := tg.path("src/" + importPath)
tg.runGit(repoDir, "branch", "--contains", "HEAD")
tg.grepStdout(`\* another-branch`, "not on correct default branch")
tg.run("get", "-d", "-u", importPath)
tg.runGit(repoDir, "branch", "--contains", "HEAD")
tg.grepStdout(`\* another-branch`, "not on correct default branch")
}
// Security issue. Don't disable. See golang.org/issue/22125.
func TestAccidentalGitCheckout(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
testenv.MustHaveExecPath(t, "svn")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("get", "-u", "vcs-test.golang.org/go/test1-svn-git")
tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason")
if _, err := os.Stat(tg.path("SrC")); err == nil {
// This case only triggers on a case-insensitive file system.
tg.runFail("get", "-u", "vcs-test.golang.org/go/test2-svn-git/test2main")
tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason")
}
}
func TestPackageMainTestCompilerFlags(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p1.go", "package main\n")
tg.tempFile("src/p1/p1_test.go", "package main\nimport \"testing\"\nfunc Test(t *testing.T){}\n")
tg.run("test", "-c", "-n", "p1")
tg.grepBothNot(`([\\/]compile|gccgo).* (-p main|-fgo-pkgpath=main).*p1\.go`, "should not have run compile -p main p1.go")
tg.grepStderr(`([\\/]compile|gccgo).* (-p p1|-fgo-pkgpath=p1).*p1\.go`, "should have run compile -p p1 p1.go")
}
// Issue 4104.
func TestGoTestWithPackageListedMultipleTimes(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("test", "errors", "errors", "errors", "errors", "errors")
if strings.Contains(strings.TrimSpace(tg.getStdout()), "\n") {
t.Error("go test errors errors errors errors errors tested the same package multiple times")
}
}
func TestGoListHasAConsistentOrder(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "std")
first := tg.getStdout()
tg.run("list", "std")
if first != tg.getStdout() {
t.Error("go list std ordering is inconsistent")
}
}
func TestGoListStdDoesNotIncludeCommands(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "std")
tg.grepStdoutNot("cmd/", "go list std shows commands")
}
func TestGoListCmdOnlyShowsCommands(t *testing.T) {
skipIfGccgo(t, "gccgo does not have GOROOT")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("list", "cmd")
out := strings.TrimSpace(tg.getStdout())
for _, line := range strings.Split(out, "\n") {
if !strings.Contains(line, "cmd/") {
t.Error("go list cmd shows non-commands")
break
}
}
}
func TestGoListDeps(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/p1/p2/p3/p4")
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p.go", "package p1\nimport _ \"p1/p2\"\n")
tg.tempFile("src/p1/p2/p.go", "package p2\nimport _ \"p1/p2/p3\"\n")
tg.tempFile("src/p1/p2/p3/p.go", "package p3\nimport _ \"p1/p2/p3/p4\"\n")
tg.tempFile("src/p1/p2/p3/p4/p.go", "package p4\n")
tg.run("list", "-f", "{{.Deps}}", "p1")
tg.grepStdout("p1/p2/p3/p4", "Deps(p1) does not mention p4")
tg.run("list", "-deps", "p1")
tg.grepStdout("p1/p2/p3/p4", "-deps p1 does not mention p4")
if runtime.Compiler != "gccgo" {
// Check the list is in dependency order.
tg.run("list", "-deps", "math")
want := "internal/cpu\nunsafe\nmath/bits\nmath\n"
out := tg.stdout.String()
if !strings.Contains(out, "internal/cpu") {
// Some systems don't use internal/cpu.
want = "unsafe\nmath/bits\nmath\n"
}
if tg.stdout.String() != want {
t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want)
}
}
}
func TestGoListTest(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.run("list", "-test", "-deps", "sort")
tg.grepStdout(`^sort.test$`, "missing test main")
tg.grepStdout(`^sort$`, "missing real sort")
tg.grepStdout(`^sort \[sort.test\]$`, "missing test copy of sort")
tg.grepStdout(`^testing \[sort.test\]$`, "missing test copy of testing")
tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
tg.run("list", "-test", "sort")
tg.grepStdout(`^sort.test$`, "missing test main")
tg.grepStdout(`^sort$`, "missing real sort")
tg.grepStdout(`^sort \[sort.test\]$`, "unexpected test copy of sort")
tg.grepStdoutNot(`^testing \[sort.test\]$`, "unexpected test copy of testing")
tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
tg.run("list", "-test", "cmd/dist", "cmd/doc")
tg.grepStdout(`^cmd/dist$`, "missing cmd/dist")
tg.grepStdout(`^cmd/doc$`, "missing cmd/doc")
tg.grepStdout(`^cmd/doc\.test$`, "missing cmd/doc test")
tg.grepStdoutNot(`^cmd/dist\.test$`, "unexpected cmd/dist test")
tg.grepStdoutNot(`^testing`, "unexpected testing")
tg.run("list", "-test", "runtime/cgo")
tg.grepStdout(`^runtime/cgo$`, "missing runtime/cgo")
tg.run("list", "-deps", "-f", "{{if .DepOnly}}{{.ImportPath}}{{end}}", "sort")
tg.grepStdout(`^internal/reflectlite$`, "missing internal/reflectlite")
tg.grepStdoutNot(`^sort`, "unexpected sort")
}
func TestGoListCompiledCgo(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.run("list", "-f", `{{join .CgoFiles "\n"}}`, "net")
if tg.stdout.String() == "" {
t.Skip("net does not use cgo")
}
if strings.Contains(tg.stdout.String(), tg.tempdir) {
t.Fatalf(".CgoFiles unexpectedly mentioned cache %s", tg.tempdir)
}
tg.run("list", "-compiled", "-f", `{{.Dir}}{{"\n"}}{{join .CompiledGoFiles "\n"}}`, "net")
if !strings.Contains(tg.stdout.String(), tg.tempdir) {
t.Fatalf(".CompiledGoFiles with -compiled did not mention cache %s", tg.tempdir)
}
dir := ""
for _, file := range strings.Split(tg.stdout.String(), "\n") {
if file == "" {
continue
}
if dir == "" {
dir = file
continue
}
if !strings.Contains(file, "/") && !strings.Contains(file, `\`) {
file = filepath.Join(dir, file)
}
if _, err := os.Stat(file); err != nil {
t.Fatalf("cannot find .CompiledGoFiles result %s: %v", file, err)
}
}
}
func TestGoListExport(t *testing.T) {
skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.tempdir)
tg.run("list", "-f", "{{.Export}}", "strings")
if tg.stdout.String() != "" {
t.Fatalf(".Export without -export unexpectedly set")
}
tg.run("list", "-export", "-f", "{{.Export}}", "strings")
file := strings.TrimSpace(tg.stdout.String())
if file == "" {
t.Fatalf(".Export with -export was empty")
}
if _, err := os.Stat(file); err != nil {
t.Fatalf("cannot find .Export result %s: %v", file, err)
}
tg.run("list", "-export", "-f", "{{.BuildID}}", "strings")
buildID := strings.TrimSpace(tg.stdout.String())
if buildID == "" {
t.Fatalf(".BuildID with -export was empty")
}
tg.run("tool", "buildid", file)
toolBuildID := strings.TrimSpace(tg.stdout.String())
if buildID != toolBuildID {
t.Fatalf(".BuildID with -export %q disagrees with 'go tool buildid' %q", buildID, toolBuildID)
}
}
// Issue 4096. Validate the output of unsuccessful go install foo/quxx.
func TestUnsuccessfulGoInstallShouldMentionMissingPackage(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(`cannot find package "foo/quxx" in any of`) != 1 {
t.Error(`go install foo/quxx expected error: .*cannot find package "foo/quxx" in any of`)
}
}
func TestGOROOTSearchFailureReporting(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("foo", "quxx"))+` \(from \$GOROOT\)$`) != 1 {
t.Error(`go install foo/quxx expected error: .*foo/quxx (from $GOROOT)`)
}
}
func TestMultipleGOPATHEntriesReportedSeparately(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(`testdata[/\\].[/\\]src[/\\]foo[/\\]quxx`) != 2 {
t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)\n.*testdata/b/src/foo/quxx`)
}
}
// Test (from $GOPATH) annotation is reported for the first GOPATH entry,
func TestMentionGOPATHInFirstGOPATHEntry(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "a", "src", "foo", "quxx"))+` \(from \$GOPATH\)$`) != 1 {
t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)`)
}
}
// but not on the second.
func TestMentionGOPATHNotOnSecondEntry(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
sep := string(filepath.ListSeparator)
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b"))
tg.runFail("install", "foo/quxx")
if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "b", "src", "foo", "quxx"))+`$`) != 1 {
t.Error(`go install foo/quxx expected error: .*testdata/b/src/foo/quxx`)
}
}
func homeEnvName() string {
switch runtime.GOOS {
case "windows":
return "USERPROFILE"
case "plan9":
return "home"
default:
return "HOME"
}
}
func tempEnvName() string {
switch runtime.GOOS {
case "windows":
return "TMP"
case "plan9":
return "TMPDIR" // actually plan 9 doesn't have one at all but this is fine
default:
return "TMPDIR"
}
}
func TestDefaultGOPATH(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("home/go")
tg.setenv(homeEnvName(), tg.path("home"))
tg.run("env", "GOPATH")
tg.grepStdout(regexp.QuoteMeta(tg.path("home/go")), "want GOPATH=$HOME/go")
tg.setenv("GOROOT", tg.path("home/go"))
tg.run("env", "GOPATH")
tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go")
tg.setenv("GOROOT", tg.path("home/go")+"/")
tg.run("env", "GOPATH")
tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go/")
}
func TestDefaultGOPATHGet(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
// warn for creating directory
tg.run("get", "-v", "github.com/golang/example/hello")
tg.grepStderr("created GOPATH="+regexp.QuoteMeta(tg.path("home/go"))+"; see 'go help gopath'", "did not create GOPATH")
// no warning if directory already exists
tg.must(robustio.RemoveAll(tg.path("home/go")))
tg.tempDir("home/go")
tg.run("get", "github.com/golang/example/hello")
tg.grepStderrNot(".", "expected no output on standard error")
// error if $HOME/go is a file
tg.must(robustio.RemoveAll(tg.path("home/go")))
tg.tempFile("home/go", "")
tg.runFail("get", "github.com/golang/example/hello")
tg.grepStderr(`mkdir .*[/\\]go: .*(not a directory|cannot find the path)`, "expected error because $HOME/go is a file")
}
func TestDefaultGOPATHPrintedSearchList(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", "")
tg.tempDir("home")
tg.setenv(homeEnvName(), tg.path("home"))
tg.runFail("install", "github.com/golang/example/hello")
tg.grepStderr(regexp.QuoteMeta(tg.path("home/go/src/github.com/golang/example/hello"))+`.*from \$GOPATH`, "expected default GOPATH")
}
func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) {
skipIfGccgo(t, "gccgo does not support -ldflags -X")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main
var extern string
func main() {
println(extern)
}`)
tg.run("run", "-ldflags", `-X "main.extern=hello world"`, tg.path("main.go"))
tg.grepStderr("^hello world", `ldflags -X "main.extern=hello world"' failed`)
}
func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.run("test", "-c", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -c -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashOWritesBinary(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.run("test", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test")
}
func TestGoTestDashIDashOWritesBinary(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
// don't let test -i overwrite runtime
tg.wantNotStale("runtime", "", "must be non-stale before test -i")
tg.run("test", "-v", "-i", "-o", tg.path("myerrors.test"+exeSuffix), "errors")
tg.grepBothNot("PASS|FAIL", "test should not have run")
tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test")
}
// Issue 4515.
func TestInstallWithTags(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("bin")
tg.tempFile("src/example/a/main.go", `package main
func main() {}`)
tg.tempFile("src/example/b/main.go", `// +build mytag
package main
func main() {}`)
tg.setenv("GOPATH", tg.path("."))
tg.run("install", "-tags", "mytag", "example/a", "example/b")
tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/a example/b did not install binaries")
tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/a example/b did not install binaries")
tg.must(os.Remove(tg.path("bin/a" + exeSuffix)))
tg.must(os.Remove(tg.path("bin/b" + exeSuffix)))
tg.run("install", "-tags", "mytag", "example/...")
tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/... did not install binaries")
tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/... did not install binaries")
tg.run("list", "-tags", "mytag", "example/b...")
if strings.TrimSpace(tg.getStdout()) != "example/b" {
t.Error("go list example/b did not find example/b")
}
}
// Issue 17451, 17662.
func TestSymlinkWarning(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempDir("src/example/xx")
tg.tempDir("yy/zz")
tg.tempFile("yy/zz/zz.go", "package zz\n")
if err := os.Symlink(tg.path("yy"), tg.path("src/example/xx/yy")); err != nil {
t.Skipf("symlink failed: %v", err)
}
tg.run("list", "example/xx/z...")
tg.grepStdoutNot(".", "list should not have matched anything")
tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages")
tg.grepStderrNot("symlink", "list should not have reported symlink")
tg.run("list", "example/xx/...")
tg.grepStdoutNot(".", "list should not have matched anything")
tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages")
tg.grepStderr("ignoring symlink", "list should have reported symlink")
}
func TestCgoShowsFullPathNames(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/y/dirname/foo.go", `
package foo
import "C"
func f() {`)
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "x/y/dirname")
tg.grepBoth("x/y/dirname", "error did not use full path")
}
func TestCgoHandlesWlORIGIN(t *testing.T) {
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/origin/origin.go", `package origin
// #cgo !darwin LDFLAGS: -Wl,-rpath,$ORIGIN
// void f(void) {}
import "C"
func f() { C.f() }`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "origin")
}
func TestCgoPkgConfig(t *testing.T) {
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("env", "PKG_CONFIG")
pkgConfig := strings.TrimSpace(tg.getStdout())
testenv.MustHaveExecPath(t, pkgConfig)
if out, err := exec.Command(pkgConfig, "--atleast-pkgconfig-version", "0.24").CombinedOutput(); err != nil {
t.Skipf("%s --atleast-pkgconfig-version 0.24: %v\n%s", pkgConfig, err, out)
}
// OpenBSD's pkg-config is strict about whitespace and only
// supports backslash-escaped whitespace. It does not support
// quotes, which the normal freedesktop.org pkg-config does
// support. See https://man.openbsd.org/pkg-config.1
tg.tempFile("foo.pc", `
Name: foo
Description: The foo library
Version: 1.0.0
Cflags: -Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world
`)
tg.tempFile("foo.go", `package main
/*
#cgo pkg-config: foo
int value() {
return DEFINED_FROM_PKG_CONFIG;
}
*/
import "C"
import "os"
func main() {
if C.value() != 42 {
println("value() =", C.value(), "wanted 42")
os.Exit(1)
}
}
`)
tg.setenv("PKG_CONFIG_PATH", tg.path("."))
tg.run("run", tg.path("foo.go"))
}
func TestListTemplateContextFunction(t *testing.T) {
t.Parallel()
for _, tt := range []struct {
v string
want string
}{
{"GOARCH", runtime.GOARCH},
{"GOOS", runtime.GOOS},
{"GOROOT", filepath.Clean(runtime.GOROOT())},
{"GOPATH", os.Getenv("GOPATH")},
{"CgoEnabled", ""},
{"UseAllFiles", ""},
{"Compiler", ""},
{"BuildTags", ""},
{"ReleaseTags", ""},
{"InstallSuffix", ""},
} {
tt := tt
t.Run(tt.v, func(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tmpl := "{{context." + tt.v + "}}"
tg.run("list", "-f", tmpl)
if tt.want == "" {
return
}
if got := strings.TrimSpace(tg.getStdout()); got != tt.want {
t.Errorf("go list -f %q: got %q; want %q", tmpl, got, tt.want)
}
})
}
}
// Test that you cannot use a local import in a package
// accessed by a non-local import (found in a GOPATH/GOROOT).
// See golang.org/issue/17475.
func TestImportLocal(t *testing.T) {
tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.tempFile("src/dir/x/x.go", `package x
var X int
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "dir/x")
// Ordinary import should work.
tg.tempFile("src/dir/p0/p.go", `package p0
import "dir/x"
var _ = x.X
`)
tg.run("build", "dir/p0")
// Relative import should not.
tg.tempFile("src/dir/p1/p.go", `package p1
import "../x"
var _ = x.X
`)
tg.runFail("build", "dir/p1")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/p2/p.go", `package p2
`)
tg.tempFile("src/dir/p2/p_test.go", `package p2
import "../x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/p2")
tg.runFail("test", "dir/p2")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an xtest.
tg.tempFile("src/dir/p2/p_test.go", `package p2_test
import "../x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/p2")
tg.runFail("test", "dir/p2")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import starting with ./ should not work either.
tg.tempFile("src/dir/d.go", `package dir
import "./x"
var _ = x.X
`)
tg.runFail("build", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/d.go", `package dir
`)
tg.tempFile("src/dir/d_test.go", `package dir
import "./x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir")
tg.runFail("test", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an xtest.
tg.tempFile("src/dir/d_test.go", `package dir_test
import "./x"
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir")
tg.runFail("test", "dir")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import plain ".." should not work.
tg.tempFile("src/dir/x/y/y.go", `package dir
import ".."
var _ = x.X
`)
tg.runFail("build", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in a test.
tg.tempFile("src/dir/x/y/y.go", `package y
`)
tg.tempFile("src/dir/x/y/y_test.go", `package y
import ".."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x/y")
tg.runFail("test", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// ... even in an x test.
tg.tempFile("src/dir/x/y/y_test.go", `package y_test
import ".."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x/y")
tg.runFail("test", "dir/x/y")
tg.grepStderr("local import.*in non-local package", "did not diagnose local import")
// Relative import "." should not work.
tg.tempFile("src/dir/x/xx.go", `package x
import "."
var _ = x.X
`)
tg.runFail("build", "dir/x")
tg.grepStderr("cannot import current directory", "did not diagnose import current directory")
// ... even in a test.
tg.tempFile("src/dir/x/xx.go", `package x
`)
tg.tempFile("src/dir/x/xx_test.go", `package x
import "."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x")
tg.runFail("test", "dir/x")
tg.grepStderr("cannot import current directory", "did not diagnose import current directory")
// ... even in an xtest.
tg.tempFile("src/dir/x/xx.go", `package x
`)
tg.tempFile("src/dir/x/xx_test.go", `package x_test
import "."
import "testing"
var _ = x.X
func TestFoo(t *testing.T) {}
`)
tg.run("build", "dir/x")
tg.runFail("test", "dir/x")
tg.grepStderr("cannot import current directory", "did not diagnose import current directory")
}
func TestGoInstallPkgdir(t *testing.T) {
skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.makeTempdir()
pkg := tg.path(".")
tg.run("install", "-pkgdir", pkg, "sync")
tg.mustExist(filepath.Join(pkg, "sync.a"))
tg.mustNotExist(filepath.Join(pkg, "sync/atomic.a"))
tg.run("install", "-i", "-pkgdir", pkg, "sync")
tg.mustExist(filepath.Join(pkg, "sync.a"))
tg.mustExist(filepath.Join(pkg, "sync/atomic.a"))
}
// For issue 14337.
func TestParallelTest(t *testing.T) {
tooSlow(t)
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.makeTempdir()
const testSrc = `package package_test
import (
"testing"
)
func TestTest(t *testing.T) {
}`
tg.tempFile("src/p1/p1_test.go", strings.Replace(testSrc, "package_test", "p1_test", 1))
tg.tempFile("src/p2/p2_test.go", strings.Replace(testSrc, "package_test", "p2_test", 1))
tg.tempFile("src/p3/p3_test.go", strings.Replace(testSrc, "package_test", "p3_test", 1))
tg.tempFile("src/p4/p4_test.go", strings.Replace(testSrc, "package_test", "p4_test", 1))
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "-p=4", "p1", "p2", "p3", "p4")
}
func TestBinaryOnlyPackages(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.path("."))
tg.tempFile("src/p1/p1.go", `//go:binary-only-package
package p1
`)
tg.wantStale("p1", "binary-only packages are no longer supported", "p1 is binary-only, and this message should always be printed")
tg.runFail("install", "p1")
tg.grepStderr("binary-only packages are no longer supported", "did not report attempt to compile binary-only package")
tg.tempFile("src/p1/p1.go", `
package p1
import "fmt"
func F(b bool) { fmt.Printf("hello from p1\n"); if b { F(false) } }
`)
tg.run("install", "p1")
os.Remove(tg.path("src/p1/p1.go"))
tg.mustNotExist(tg.path("src/p1/p1.go"))
tg.tempFile("src/p2/p2.go", `//go:binary-only-packages-are-not-great
package p2
import "p1"
func F() { p1.F(true) }
`)
tg.runFail("install", "p2")
tg.grepStderr("no Go files", "did not complain about missing sources")
tg.tempFile("src/p1/missing.go", `//go:binary-only-package
package p1
import _ "fmt"
func G()
`)
tg.wantStale("p1", "binary-only package", "should NOT want to rebuild p1 (first)")
tg.runFail("install", "p2")
tg.grepStderr("p1: binary-only packages are no longer supported", "did not report error for binary-only p1")
tg.run("list", "-deps", "-f", "{{.ImportPath}}: {{.BinaryOnly}}", "p2")
tg.grepStdout("p1: true", "p1 not listed as BinaryOnly")
tg.grepStdout("p2: false", "p2 listed as BinaryOnly")
}
// Issue 16050.
func TestAlwaysLinkSysoFiles(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/syso")
tg.tempFile("src/syso/a.syso", ``)
tg.tempFile("src/syso/b.go", `package syso`)
tg.setenv("GOPATH", tg.path("."))
// We should see the .syso file regardless of the setting of
// CGO_ENABLED.
tg.setenv("CGO_ENABLED", "1")
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=1")
tg.setenv("CGO_ENABLED", "0")
tg.run("list", "-f", "{{.SysoFiles}}", "syso")
tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=0")
}
// Issue 16120.
func TestGenerateUsesBuildContext(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("this test won't run under Windows")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src/gen")
tg.tempFile("src/gen/gen.go", "package gen\n//go:generate echo $GOOS $GOARCH\n")
tg.setenv("GOPATH", tg.path("."))
tg.setenv("GOOS", "linux")
tg.setenv("GOARCH", "amd64")
tg.run("generate", "gen")
tg.grepStdout("linux amd64", "unexpected GOOS/GOARCH combination")
tg.setenv("GOOS", "darwin")
tg.setenv("GOARCH", "arm64")
tg.run("generate", "gen")
tg.grepStdout("darwin arm64", "unexpected GOOS/GOARCH combination")
}
func TestGoEnv(t *testing.T) {
tg := testgo(t)
tg.parallel()
defer tg.cleanup()
tg.setenv("GOOS", "freebsd") // to avoid invalid pair errors
tg.setenv("GOARCH", "arm")
tg.run("env", "GOARCH")
tg.grepStdout("^arm$", "GOARCH not honored")
tg.run("env", "GCCGO")
tg.grepStdout(".", "GCCGO unexpectedly empty")
tg.run("env", "CGO_CFLAGS")
tg.grepStdout(".", "default CGO_CFLAGS unexpectedly empty")
tg.setenv("CGO_CFLAGS", "-foobar")
tg.run("env", "CGO_CFLAGS")
tg.grepStdout("^-foobar$", "CGO_CFLAGS not honored")
tg.setenv("CC", "gcc -fmust -fgo -ffaster")
tg.run("env", "CC")
tg.grepStdout("gcc", "CC not found")
tg.run("env", "GOGCCFLAGS")
tg.grepStdout("-ffaster", "CC arguments not found")
}
const (
noMatchesPattern = `(?m)^ok.*\[no tests to run\]`
okPattern = `(?m)^ok`
)
// Issue 18044.
func TestLdBindNow(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("LD_BIND_NOW", "1")
tg.run("help")
}
// Issue 18225.
// This is really a cmd/asm issue but this is a convenient place to test it.
func TestConcurrentAsm(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/asm")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
asm := `DATA ·constants<>+0x0(SB)/8,$0
GLOBL ·constants<>(SB),8,$8
`
tg.tempFile("go/src/p/a.s", asm)
tg.tempFile("go/src/p/b.s", asm)
tg.tempFile("go/src/p/p.go", `package p`)
tg.setenv("GOPATH", tg.path("go"))
tg.run("build", "p")
}
// Issue 18975.
func TestFFLAGS(t *testing.T) {
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("p/src/p/main.go", `package main
// #cgo FFLAGS: -no-such-fortran-flag
import "C"
func main() {}
`)
tg.tempFile("p/src/p/a.f", `! comment`)
tg.setenv("GOPATH", tg.path("p"))
// This should normally fail because we are passing an unknown flag,
// but issue #19080 points to Fortran compilers that succeed anyhow.
// To work either way we call doRun directly rather than run or runFail.
tg.doRun([]string{"build", "-x", "p"})
tg.grepStderr("no-such-fortran-flag", `missing expected "-no-such-fortran-flag"`)
}
// Issue 19198.
// This is really a cmd/link issue but this is a convenient place to test it.
func TestDuplicateGlobalAsmSymbols(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/asm")
tooSlow(t)
if runtime.GOARCH != "386" && runtime.GOARCH != "amd64" {
t.Skipf("skipping test on %s", runtime.GOARCH)
}
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
asm := `
#include "textflag.h"
DATA sym<>+0x0(SB)/8,$0
GLOBL sym<>(SB),(NOPTR+RODATA),$8
TEXT ·Data(SB),NOSPLIT,$0
MOVB sym<>(SB), AX
MOVB AX, ret+0(FP)
RET
`
tg.tempFile("go/src/a/a.s", asm)
tg.tempFile("go/src/a/a.go", `package a; func Data() uint8`)
tg.tempFile("go/src/b/b.s", asm)
tg.tempFile("go/src/b/b.go", `package b; func Data() uint8`)
tg.tempFile("go/src/p/p.go", `
package main
import "a"
import "b"
import "C"
func main() {
_ = a.Data() + b.Data()
}
`)
tg.setenv("GOPATH", tg.path("go"))
exe := tg.path("p.exe")
tg.creatingTemp(exe)
tg.run("build", "-o", exe, "p")
}
func copyFile(src, dst string, perm fs.FileMode) error {
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
_, err = io.Copy(df, sf)
err2 := df.Close()
if err != nil {
return err
}
return err2
}
func TestNeedVersion(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/compile")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("goversion.go", `package main; func main() {}`)
path := tg.path("goversion.go")
tg.setenv("TESTGO_VERSION", "go1.testgo")
tg.runFail("run", path)
tg.grepStderr("compile", "does not match go tool version")
}
func TestBuildmodePIE(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Skipf("skipping in -short mode on non-builder")
}
platform := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
switch platform {
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/riscv64", "linux/s390x",
"android/amd64", "android/arm", "android/arm64", "android/386",
"freebsd/amd64",
"windows/386", "windows/amd64", "windows/arm":
case "darwin/amd64":
default:
t.Skipf("skipping test because buildmode=pie is not supported on %s", platform)
}
t.Run("non-cgo", func(t *testing.T) {
testBuildmodePIE(t, false, true)
})
if canCgo {
switch runtime.GOOS {
case "darwin", "freebsd", "linux", "windows":
t.Run("cgo", func(t *testing.T) {
testBuildmodePIE(t, true, true)
})
}
}
}
func TestWindowsDefaultBuildmodIsPIE(t *testing.T) {
if testing.Short() && testenv.Builder() == "" {
t.Skipf("skipping in -short mode on non-builder")
}
if runtime.GOOS != "windows" {
t.Skip("skipping windows only test")
}
t.Run("non-cgo", func(t *testing.T) {
testBuildmodePIE(t, false, false)
})
if canCgo {
t.Run("cgo", func(t *testing.T) {
testBuildmodePIE(t, true, false)
})
}
}
func testBuildmodePIE(t *testing.T, useCgo, setBuildmodeToPIE bool) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
var s string
if useCgo {
s = `import "C";`
}
tg.tempFile("main.go", fmt.Sprintf(`package main;%s func main() { print("hello") }`, s))
src := tg.path("main.go")
obj := tg.path("main.exe")
args := []string{"build"}
if setBuildmodeToPIE {
args = append(args, "-buildmode=pie")
}
args = append(args, "-o", obj, src)
tg.run(args...)
switch runtime.GOOS {
case "linux", "android", "freebsd":
f, err := elf.Open(obj)
if err != nil {
t.Fatal(err)
}
defer f.Close()
if f.Type != elf.ET_DYN {
t.Errorf("PIE type must be ET_DYN, but %s", f.Type)
}
case "darwin":
f, err := macho.Open(obj)
if err != nil {
t.Fatal(err)
}
defer f.Close()
if f.Flags&macho.FlagDyldLink == 0 {
t.Error("PIE must have DyldLink flag, but not")
}
if f.Flags&macho.FlagPIE == 0 {
t.Error("PIE must have PIE flag, but not")
}
case "windows":
f, err := pe.Open(obj)
if err != nil {
t.Fatal(err)
}
defer f.Close()
if f.Section(".reloc") == nil {
t.Error(".reloc section is not present")
}
if (f.FileHeader.Characteristics & pe.IMAGE_FILE_RELOCS_STRIPPED) != 0 {
t.Error("IMAGE_FILE_RELOCS_STRIPPED flag is set")
}
var dc uint16
switch oh := f.OptionalHeader.(type) {
case *pe.OptionalHeader32:
dc = oh.DllCharacteristics
case *pe.OptionalHeader64:
dc = oh.DllCharacteristics
if (dc & pe.IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA) == 0 {
t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag is not set")
}
default:
t.Fatalf("unexpected optional header type of %T", f.OptionalHeader)
}
if (dc & pe.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) == 0 {
t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set")
}
if useCgo {
// Test that only one symbol is exported (#40795).
// PIE binaries don´t require .edata section but unfortunately
// binutils doesn´t generate a .reloc section unless there is
// at least one symbol exported.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011
section := f.Section(".edata")
if section == nil {
t.Fatalf(".edata section is not present")
}
// TODO: deduplicate this struct from cmd/link/internal/ld/pe.go
type IMAGE_EXPORT_DIRECTORY struct {
_ [2]uint32
_ [2]uint16
_ [2]uint32
NumberOfFunctions uint32
NumberOfNames uint32
_ [3]uint32
}
var e IMAGE_EXPORT_DIRECTORY
if err := binary.Read(section.Open(), binary.LittleEndian, &e); err != nil {
t.Fatalf("binary.Read failed: %v", err)
}
// Only _cgo_dummy_export should be exported
if e.NumberOfFunctions != 1 {
t.Fatalf("got %d exported functions; want 1", e.NumberOfFunctions)
}
if e.NumberOfNames != 1 {
t.Fatalf("got %d exported names; want 1", e.NumberOfNames)
}
}
default:
panic("unreachable")
}
out, err := exec.Command(obj).CombinedOutput()
if err != nil {
t.Fatal(err)
}
if string(out) != "hello" {
t.Errorf("got %q; want %q", out, "hello")
}
}
func TestUpxCompression(t *testing.T) {
if runtime.GOOS != "linux" ||
(runtime.GOARCH != "amd64" && runtime.GOARCH != "386") {
t.Skipf("skipping upx test on %s/%s", runtime.GOOS, runtime.GOARCH)
}
testenv.MustHaveExecPath(t, "upx")
out, err := exec.Command("upx", "--version").CombinedOutput()
if err != nil {
t.Fatalf("upx --version failed: %v", err)
}
// upx --version prints `upx <version>` in the first line of output:
// upx 3.94
// [...]
re := regexp.MustCompile(`([[:digit:]]+)\.([[:digit:]]+)`)
upxVersion := re.FindStringSubmatch(string(out))
if len(upxVersion) != 3 {
t.Fatalf("bad upx version string: %s", upxVersion)
}
major, err1 := strconv.Atoi(upxVersion[1])
minor, err2 := strconv.Atoi(upxVersion[2])
if err1 != nil || err2 != nil {
t.Fatalf("bad upx version string: %s", upxVersion[0])
}
// Anything below 3.94 is known not to work with go binaries
if (major < 3) || (major == 3 && minor < 94) {
t.Skipf("skipping because upx version %v.%v is too old", major, minor)
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main; import "fmt"; func main() { fmt.Print("hello upx") }`)
src := tg.path("main.go")
obj := tg.path("main")
tg.run("build", "-o", obj, src)
out, err = exec.Command("upx", obj).CombinedOutput()
if err != nil {
t.Logf("executing upx\n%s\n", out)
t.Fatalf("upx failed with %v", err)
}
out, err = exec.Command(obj).CombinedOutput()
if err != nil {
t.Logf("%s", out)
t.Fatalf("running compressed go binary failed with error %s", err)
}
if string(out) != "hello upx" {
t.Fatalf("bad output from compressed go binary:\ngot %q; want %q", out, "hello upx")
}
}
func TestCacheListStale(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("gopath/src/p/p.go", "package p; import _ \"q\"; func F(){}\n")
tg.tempFile("gopath/src/q/q.go", "package q; func F(){}\n")
tg.tempFile("gopath/src/m/m.go", "package main; import _ \"q\"; func main(){}\n")
tg.setenv("GOPATH", tg.path("gopath"))
tg.run("install", "p", "m")
tg.run("list", "-f={{.ImportPath}} {{.Stale}}", "m", "q", "p")
tg.grepStdout("^m false", "m should not be stale")
tg.grepStdout("^q true", "q should be stale")
tg.grepStdout("^p false", "p should not be stale")
}
func TestCacheCoverage(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("c1"))
tg.run("test", "-cover", "-short", "strings")
tg.run("test", "-cover", "-short", "math", "strings")
}
func TestIssue22588(t *testing.T) {
// Don't get confused by stderr coming from tools.
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
if _, err := os.Stat("/usr/bin/time"); err != nil {
t.Skip(err)
}
tg.run("list", "-f={{.Stale}}", "runtime")
tg.run("list", "-toolexec=/usr/bin/time", "-f={{.Stale}}", "runtime")
tg.grepStdout("false", "incorrectly reported runtime as stale")
}
func TestIssue22531(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("src/m/main.go", "package main /* c1 */; func main() {}\n")
tg.run("install", "-x", "m")
tg.run("list", "-f", "{{.Stale}}", "m")
tg.grepStdout("false", "reported m as stale after install")
tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix))
// The link action ID did not include the full main build ID,
// even though the full main build ID is written into the
// eventual binary. That caused the following install to
// be a no-op, thinking the gofmt binary was up-to-date,
// even though .Stale could see it was not.
tg.tempFile("src/m/main.go", "package main /* c2 */; func main() {}\n")
tg.run("install", "-x", "m")
tg.run("list", "-f", "{{.Stale}}", "m")
tg.grepStdout("false", "reported m as stale after reinstall")
tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix))
}
func TestIssue22596(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOCACHE", tg.path("cache"))
tg.tempFile("gopath1/src/p/p.go", "package p; func F(){}\n")
tg.tempFile("gopath2/src/p/p.go", "package p; func F(){}\n")
tg.setenv("GOPATH", tg.path("gopath1"))
tg.run("list", "-f={{.Target}}", "p")
target1 := strings.TrimSpace(tg.getStdout())
tg.run("install", "p")
tg.wantNotStale("p", "", "p stale after install")
tg.setenv("GOPATH", tg.path("gopath2"))
tg.run("list", "-f={{.Target}}", "p")
target2 := strings.TrimSpace(tg.getStdout())
tg.must(os.MkdirAll(filepath.Dir(target2), 0777))
tg.must(copyFile(target1, target2, 0666))
tg.wantStale("p", "build ID mismatch", "p not stale after copy from gopath1")
tg.run("install", "p")
tg.wantNotStale("p", "", "p stale after install2")
}
func TestTestCache(t *testing.T) {
tooSlow(t)
if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") {
t.Skip("GODEBUG gocacheverify")
}
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
tg.setenv("GOCACHE", tg.path("cache"))
// The -p=1 in the commands below just makes the -x output easier to read.
t.Log("\n\nINITIAL\n\n")
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n")
tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\nvar X = 1\n")
tg.tempFile("src/t/t1/t1_test.go", "package t\nimport \"testing\"\nfunc Test1(*testing.T) {}\n")
tg.tempFile("src/t/t2/t2_test.go", "package t\nimport _ \"p1\"\nimport \"testing\"\nfunc Test2(*testing.T) {}\n")
tg.tempFile("src/t/t3/t3_test.go", "package t\nimport \"p1\"\nimport \"testing\"\nfunc Test3(t *testing.T) {t.Log(p1.X)}\n")
tg.tempFile("src/t/t4/t4_test.go", "package t\nimport \"p2\"\nimport \"testing\"\nfunc Test4(t *testing.T) {t.Log(p2.X)}")
tg.run("test", "-x", "-v", "-short", "t/...")
t.Log("\n\nREPEAT\n\n")
tg.run("test", "-x", "-v", "-short", "t/...")
tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1")
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2")
tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3")
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4")
tg.grepStderrNot(`[\\/](compile|gccgo) `, "incorrectly ran compiler")
tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker")
tg.grepStderrNot(`p[0-9]\.test`, "incorrectly ran test")
t.Log("\n\nCOMMENT\n\n")
// Changing the program text without affecting the compiled package
// should result in the package being rebuilt but nothing more.
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 01\n")
tg.run("test", "-p=1", "-x", "-v", "-short", "t/...")
tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1")
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2")
tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3")
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4")
tg.grepStderrNot(`([\\/](compile|gccgo) ).*t[0-9]_test\.go`, "incorrectly ran compiler")
tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker")
tg.grepStderrNot(`t[0-9]\.test.*test\.short`, "incorrectly ran test")
t.Log("\n\nCHANGE\n\n")
// Changing the actual package should have limited effects.
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 02\n")
tg.run("test", "-p=1", "-x", "-v", "-short", "t/...")
// p2 should have been rebuilt.
tg.grepStderr(`([\\/]compile|gccgo).*p2.go`, "did not recompile p2")
// t1 does not import anything, should not have been rebuilt.
tg.grepStderrNot(`([\\/]compile|gccgo).*t1_test.go`, "incorrectly recompiled t1")
tg.grepStderrNot(`([\\/]link|gccgo).*t1_test`, "incorrectly relinked t1_test")
tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t/t1")
// t2 imports p1 and must be rebuilt and relinked,
// but the change should not have any effect on the test binary,
// so the test should not have been rerun.
tg.grepStderr(`([\\/]compile|gccgo).*t2_test.go`, "did not recompile t2")
tg.grepStderr(`([\\/]link|gccgo).*t2\.test`, "did not relink t2_test")
// This check does not currently work with gccgo, as garbage
// collection of unused variables is not turned on by default.
if runtime.Compiler != "gccgo" {
tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t/t2")
}
// t3 imports p1, and changing X changes t3's test binary.
tg.grepStderr(`([\\/]compile|gccgo).*t3_test.go`, "did not recompile t3")
tg.grepStderr(`([\\/]link|gccgo).*t3\.test`, "did not relink t3_test")
tg.grepStderr(`t3\.test.*-test.short`, "did not rerun t3_test")
tg.grepStdoutNot(`ok \tt/t3\t\(cached\)`, "reported cached t3_test result")
// t4 imports p2, but p2 did not change, so t4 should be relinked, not recompiled,
// and not rerun.
tg.grepStderrNot(`([\\/]compile|gccgo).*t4_test.go`, "incorrectly recompiled t4")
tg.grepStderr(`([\\/]link|gccgo).*t4\.test`, "did not relink t4_test")
// This check does not currently work with gccgo, as garbage
// collection of unused variables is not turned on by default.
if runtime.Compiler != "gccgo" {
tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t/t4")
}
}
func TestTestSkipVetAfterFailedBuild(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("x_test.go", `package x
func f() {
return 1
}
`)
tg.runFail("test", tg.path("x_test.go"))
tg.grepStderrNot(`vet`, "vet should be skipped after the failed build")
}
func TestTestVetRebuild(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
// golang.org/issue/23701.
// b_test imports b with augmented method from export_test.go.
// b_test also imports a, which imports b.
// Must not accidentally see un-augmented b propagate through a to b_test.
tg.tempFile("src/a/a.go", `package a
import "b"
type Type struct{}
func (*Type) M() b.T {return 0}
`)
tg.tempFile("src/b/b.go", `package b
type T int
type I interface {M() T}
`)
tg.tempFile("src/b/export_test.go", `package b
func (*T) Method() *T { return nil }
`)
tg.tempFile("src/b/b_test.go", `package b_test
import (
"testing"
"a"
. "b"
)
func TestBroken(t *testing.T) {
x := new(T)
x.Method()
_ = new(a.Type)
}
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "b")
tg.run("vet", "b")
}
func TestInstallDeps(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.makeTempdir()
tg.setenv("GOPATH", tg.tempdir)
tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n")
tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\n")
tg.tempFile("src/main1/main.go", "package main\nimport _ \"p2\"\nfunc main() {}\n")
tg.run("list", "-f={{.Target}}", "p1")
p1 := strings.TrimSpace(tg.getStdout())
tg.run("list", "-f={{.Target}}", "p2")
p2 := strings.TrimSpace(tg.getStdout())
tg.run("list", "-f={{.Target}}", "main1")
main1 := strings.TrimSpace(tg.getStdout())
tg.run("install", "main1")
tg.mustExist(main1)
tg.mustNotExist(p2)
tg.mustNotExist(p1)
tg.run("install", "p2")
tg.mustExist(p2)
tg.mustNotExist(p1)
// don't let install -i overwrite runtime
tg.wantNotStale("runtime", "", "must be non-stale before install -i")
tg.run("install", "-i", "main1")
tg.mustExist(p1)
tg.must(os.Remove(p1))
tg.run("install", "-i", "p2")
tg.mustExist(p1)
}
// Issue 22986.
func TestImportPath(t *testing.T) {
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/a/a.go", `
package main
import (
"log"
p "a/p-1.0"
)
func main() {
if !p.V {
log.Fatal("false")
}
}`)
tg.tempFile("src/a/a_test.go", `
package main_test
import (
p "a/p-1.0"
"testing"
)
func TestV(t *testing.T) {
if !p.V {
t.Fatal("false")
}
}`)
tg.tempFile("src/a/p-1.0/p.go", `
package p
var V = true
func init() {}
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "-o", tg.path("a.exe"), "a")
tg.run("test", "a")
}
func TestBadCommandLines(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.run("build", "x")
tg.tempFile("src/x/@y.go", "package x\n")
tg.runFail("build", "x")
tg.grepStderr("invalid input file name \"@y.go\"", "did not reject @y.go")
tg.must(os.Remove(tg.path("src/x/@y.go")))
tg.tempFile("src/x/-y.go", "package x\n")
tg.runFail("build", "x")
tg.grepStderr("invalid input file name \"-y.go\"", "did not reject -y.go")
tg.must(os.Remove(tg.path("src/x/-y.go")))
if runtime.Compiler == "gccgo" {
tg.runFail("build", "-gccgoflags=all=@x", "x")
} else {
tg.runFail("build", "-gcflags=all=@x", "x")
}
tg.grepStderr("invalid command-line argument @x in command", "did not reject @x during exec")
tg.tempFile("src/@x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "@x")
tg.grepStderr("invalid input directory name \"@x\"|cannot use path@version syntax", "did not reject @x directory")
tg.tempFile("src/@x/y/y.go", "package y\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "@x/y")
tg.grepStderr("invalid import path \"@x/y\"|cannot use path@version syntax", "did not reject @x/y import path")
tg.tempFile("src/-x/x.go", "package x\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "--", "-x")
tg.grepStderr("invalid import path \"-x\"", "did not reject -x import path")
tg.tempFile("src/-x/y/y.go", "package y\n")
tg.setenv("GOPATH", tg.path("."))
tg.runFail("build", "--", "-x/y")
tg.grepStderr("invalid import path \"-x/y\"", "did not reject -x/y import path")
}
func TestTwoPkgConfigs(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
t.Skipf("no shell scripts on %s", runtime.GOOS)
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/a.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("src/x/b.go", `package x
// #cgo pkg-config: --static a
import "C"
`)
tg.tempFile("pkg-config.sh", `#!/bin/sh
echo $* >>`+tg.path("pkg-config.out"))
tg.must(os.Chmod(tg.path("pkg-config.sh"), 0755))
tg.setenv("GOPATH", tg.path("."))
tg.setenv("PKG_CONFIG", tg.path("pkg-config.sh"))
tg.run("build", "x")
out, err := ioutil.ReadFile(tg.path("pkg-config.out"))
tg.must(err)
out = bytes.TrimSpace(out)
want := "--cflags --static --static -- a a\n--libs --static --static -- a a"
if !bytes.Equal(out, []byte(want)) {
t.Errorf("got %q want %q", out, want)
}
}
func TestCgoCache(t *testing.T) {
if !canCgo {
t.Skip("no cgo")
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/x/a.go", `package main
// #ifndef VAL
// #define VAL 0
// #endif
// int val = VAL;
import "C"
import "fmt"
func main() { fmt.Println(C.val) }
`)
tg.setenv("GOPATH", tg.path("."))
exe := tg.path("x.exe")
tg.run("build", "-o", exe, "x")
tg.setenv("CGO_LDFLAGS", "-lnosuchlibraryexists")
tg.runFail("build", "-o", exe, "x")
tg.grepStderr(`nosuchlibraryexists`, "did not run linker with changed CGO_LDFLAGS")
}
// Issue 23982
func TestFilepathUnderCwdFormat(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.run("test", "-x", "-cover", "log")
tg.grepStderrNot(`\.log\.cover\.go`, "-x output should contain correctly formatted filepath under cwd")
}
// Issue 24396.
func TestDontReportRemoveOfEmptyDir(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/a/a.go", `package a`)
tg.setenv("GOPATH", tg.path("."))
tg.run("install", "-x", "a")
tg.run("install", "-x", "a")
// The second install should have printed only a WORK= line,
// nothing else.
if bytes.Count(tg.stdout.Bytes(), []byte{'\n'})+bytes.Count(tg.stderr.Bytes(), []byte{'\n'}) > 1 {
t.Error("unnecessary output when installing installed package")
}
}
// Issue 24704.
func TestLinkerTmpDirIsDeleted(t *testing.T) {
skipIfGccgo(t, "gccgo does not use cmd/link")
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("a.go", `package main; import "C"; func main() {}`)
tg.run("build", "-ldflags", "-v", "-o", os.DevNull, tg.path("a.go"))
// Find line that has "host link:" in linker output.
stderr := tg.getStderr()
var hostLinkLine string
for _, line := range strings.Split(stderr, "\n") {
if !strings.Contains(line, "host link:") {
continue
}
hostLinkLine = line
break
}
if hostLinkLine == "" {
t.Fatal(`fail to find with "host link:" string in linker output`)
}
// Find parameter, like "/tmp/go-link-408556474/go.o" inside of
// "host link:" line, and extract temp directory /tmp/go-link-408556474
// out of it.
tmpdir := hostLinkLine
i := strings.Index(tmpdir, `go.o"`)
if i == -1 {
t.Fatalf(`fail to find "go.o" in "host link:" line %q`, hostLinkLine)
}
tmpdir = tmpdir[:i-1]
i = strings.LastIndex(tmpdir, `"`)
if i == -1 {
t.Fatalf(`fail to find " in "host link:" line %q`, hostLinkLine)
}
tmpdir = tmpdir[i+1:]
// Verify that temp directory has been removed.
_, err := os.Stat(tmpdir)
if err == nil {
t.Fatalf("temp directory %q has not been removed", tmpdir)
}
if !os.IsNotExist(err) {
t.Fatalf("Stat(%q) returns unexpected error: %v", tmpdir, err)
}
}
// Issue 25093.
func TestCoverpkgTestOnly(t *testing.T) {
skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("src/a/a.go", `package a
func F(i int) int {
return i*i
}`)
tg.tempFile("src/atest/a_test.go", `
package a_test
import ( "a"; "testing" )
func TestF(t *testing.T) { a.F(2) }
`)
tg.setenv("GOPATH", tg.path("."))
tg.run("test", "-coverpkg=a", "atest")
tg.grepStderrNot("no packages being tested depend on matches", "bad match message")
tg.grepStdout("coverage: 100", "no coverage")
}
| [
"\"GO_GCFLAGS\"",
"\"PATH\"",
"\"GOPATH\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"GODEBUG\"",
"\"GODEBUG\""
]
| []
| [
"GOPATH",
"GO_GCFLAGS",
"GODEBUG",
"PATH"
]
| [] | ["GOPATH", "GO_GCFLAGS", "GODEBUG", "PATH"] | go | 4 | 0 | |
web/security.go | package web
import (
"github.com/l-lin/mr-tracker-api/user"
"github.com/codegangsta/negroni"
sessions "github.com/goincremental/negroni-sessions"
oauth2 "github.com/goincremental/negroni-oauth2"
"os"
"log"
"net/http"
"io/ioutil"
"encoding/json"
"time"
"fmt"
)
const (
SESSION_USER_ID = "user_id"
googleUserInfoEndPoint = "https://www.googleapis.com/oauth2/v1/userinfo"
)
// The user info for Google account
type UserInfo struct {
Id string
Email string
Picture string
}
// Returns a new Negroni middleware using Google OAuth2
func NewOAuth() negroni.Handler {
return oauth2.Google(&oauth2.Config{
ClientID: os.Getenv("GOOGLE_CLIENT_ID"),
ClientSecret: os.Getenv("GOOGLE_CLIENT_SECRET"),
RedirectURL: os.Getenv("GOOGLE_REDIRECT_URI"),
Scopes: []string{"https://www.googleapis.com/auth/userinfo.email", "https://www.googleapis.com/auth/userinfo.profile"},
})
}
// Wrap the HandlerFunc by checking if the user is indeed authenticated
func WrapWithCheckAuth(handlerFunc http.HandlerFunc) http.HandlerFunc {
return func (w http.ResponseWriter, r *http.Request) {
oauthT := oauth2.GetToken(r)
if oauthT == nil {
reject(w)
} else {
if !oauthT.Valid() {
log.Printf("[-] The oauthToken is not valid")
userId := getUserId(r, saveOrUpdateUser)
u := user.Get(fmt.Sprintf("%v", userId))
if u != nil {
log.Printf("[-] Refreshing the token %s", u.RefreshToken)
if u.Refresh() {
handlerFunc.ServeHTTP(w, r)
}
}
} else {
log.Printf("[-] The oauthToken is valid")
userId := getUserId(r, nil)
u := user.Get(fmt.Sprintf("%v", userId))
u.LastConnection = time.Now()
u.Update()
handlerFunc.ServeHTTP(w, r)
}
}
}
}
// Get the user id.
// First fetch it from the session
// If not present, then fetch it from Google service
func getUserId(r *http.Request, callback func (UserInfo, oauth2.Tokens)) string {
s := sessions.GetSession(r)
userId := s.Get(SESSION_USER_ID)
// If userId not found, then fetch the info from Google
if userId == nil {
userInfo, oauthT, err := getUserInfo(r)
if err == nil {
userId = userInfo.Id
s.Set(SESSION_USER_ID, userId)
if callback != nil {
// Save or updating with fresh data of the user
callback(userInfo, oauthT)
}
}
} else {
log.Printf("[-] Updating last connection date for userId %v", userId)
user.UpdateLastConnection(fmt.Sprintf("%v", userId))
}
return fmt.Sprintf("%v", userId)
}
// Reject the request by sending a HTTP 401
func reject(w http.ResponseWriter) {
w.WriteHeader(http.StatusUnauthorized)
if err := json.NewEncoder(w).Encode(JsonErr{Code: http.StatusUnauthorized, Text: "You are not authenticated!"}); err != nil {
log.Fatalf("[x] Error when encoding the json. Reason: %s", err.Error())
}
}
// Save or update the given user info
func saveOrUpdateUser(userInfo UserInfo, oauthT oauth2.Tokens) {
if !user.Exists(userInfo.Id) {
u := user.New()
u.UserId = userInfo.Id
u.Email = userInfo.Email
u.Picture = userInfo.Picture
u.LastConnection = time.Now()
u.RefreshToken = oauthT.Refresh()
log.Printf("[-] Saving user %v", u)
u.Save()
} else {
u := user.Get(userInfo.Id)
u.Email = userInfo.Email
u.Picture = userInfo.Picture
u.LastConnection = time.Now()
if oauthT.Refresh() != "" {
log.Printf("[-] The refresh token is not empty => the user had revoked the permissions")
u.RefreshToken = oauthT.Refresh()
}
log.Printf("[-] Updating the user %v", u)
u.Update()
}
}
// Get the user ID from a given token.
// It will make a GET request to https://www.googleapis.com/oauth2/v1/userinfo?access_token=...
func getUserInfo(r *http.Request) (UserInfo, oauth2.Tokens, error) {
var userInfo UserInfo
oauthT := oauth2.GetToken(r)
if oauthT == nil || !oauthT.Valid() {
log.Printf("[x] The user is not authenticated yet!")
}
accessToken := oauthT.Access()
log.Printf("[-] Getting the user id from access token %s", accessToken)
endPoint := googleUserInfoEndPoint + "?access_token=" + accessToken
resp, err := http.Get(endPoint)
if err != nil {
log.Printf("[x] Could not find the user info with token %s. Reason: %s", accessToken, err.Error())
return userInfo, oauthT, err
}
defer resp.Body.Close()
response, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("[x] Error reading content of %s. Reson: %s", endPoint, err.Error())
return userInfo, oauthT, err
}
err = json.Unmarshal(response, &userInfo)
if err != nil {
log.Printf("[x] Could not unmarshal the user info. Reason: %s", err.Error())
return userInfo, oauthT, err
}
return userInfo, oauthT, nil
}
| [
"\"GOOGLE_CLIENT_ID\"",
"\"GOOGLE_CLIENT_SECRET\"",
"\"GOOGLE_REDIRECT_URI\""
]
| []
| [
"GOOGLE_REDIRECT_URI",
"GOOGLE_CLIENT_ID",
"GOOGLE_CLIENT_SECRET"
]
| [] | ["GOOGLE_REDIRECT_URI", "GOOGLE_CLIENT_ID", "GOOGLE_CLIENT_SECRET"] | go | 3 | 0 | |
cogs/voicemanagement.py | import discord
from discord.ext import commands, tasks
import os
from typing import Dict, Union
from extra import utils
server_id = int(os.getenv('SERVER_ID'))
bots_and_commands_channel_id = int(os.getenv('BOTS_AND_COMMANDS_CHANNEL_ID'))
class VoiceManagement(commands.Cog):
def __init__(self, client) -> None:
self.client = client
self.vcc_id: int = int(os.getenv('VOICE_CALLS_CHANNEL_ID'))
# user_id: {'timestamp': 123, 'camera_on': False, 'notified': False}
self.people: Dict[int, Dict[str, Union[int, bool]]] = {}
@commands.Cog.listener()
async def on_ready(self) -> None:
self.check_camera_on.start()
print('VoiceManagement cog is online!')
@tasks.loop(seconds=60)
async def check_camera_on(self) -> None:
""" Checks whether people in the Video Calls channel have their cameras on. """
current_ts = await utils.get_timestamp()
guild = self.client.get_guild(server_id)
bots_and_commands_channel = guild.get_channel(bots_and_commands_channel_id)
for user_id in list(self.people.keys()):
secs = current_ts - self.people[user_id]['timestamp']
if secs >= 60 and secs < 180:
if not self.people[user_id]['camera_on'] and not self.people[user_id]['notified']:
# Notifies user to turn on camera
msg = f"**Hey, I saw you are in the `Video Calls` channel and didn't turn on your camera. Please, do it or you will soon get disconnected!**"
try:
member = guild.get_member(user_id)
if not member.voice or not (vc := member.voice.channel):
continue
if self.vcc_id != vc.id:
continue
await member.send(msg)
self.people[user_id]['notified'] = True
except:
await bots_and_commands_channel.send(f"{msg}. {member.mention}")
elif secs >= 180:
if not self.people[user_id]['camera_on']:
del self.people[user_id]
# Disconnects users with cameras off
try:
member = guild.get_member(user_id)
if not member.voice or not (vc := member.voice.channel):
continue
if self.vcc_id != vc.id:
continue
msg = f"**You got disconnected for not turning on your camera in the `Video Calls` voice channel!**"
await member.move_to(None)
await member.send(msg)
except:
await bots_and_commands_channel.send(f"{msg}. {member.mention}")
@commands.Cog.listener()
async def on_voice_state_update(self, member: discord.Member, before: discord.VoiceState, after: discord.VoiceState):
""" Checks whether people have open cameras in the voice channel. """
if member.bot:
return
# Check voice states
if before.mute != after.mute:
return
if before.deaf != before.deaf:
return
if before.self_mute != after.self_mute:
return
if before.self_deaf != after.self_deaf:
return
if before.self_stream != after.self_stream:
return
# Get before/after channels and their categories
bc = before.channel
ac = after.channel
current_ts = await utils.get_timestamp()
# Joining the Video Calls channel
if ac and ac.id == self.vcc_id:
self.people[member.id] = {
'timestamp': current_ts,
'camera_on': after.self_video,
'notified': False
}
# Leaving the Video Calls channel
elif not ac or ac.id != self.vcc_id:
self.people.pop(member.id, None)
def setup(client) -> None:
client.add_cog(VoiceManagement(client)) | []
| []
| [
"BOTS_AND_COMMANDS_CHANNEL_ID",
"VOICE_CALLS_CHANNEL_ID",
"SERVER_ID"
]
| [] | ["BOTS_AND_COMMANDS_CHANNEL_ID", "VOICE_CALLS_CHANNEL_ID", "SERVER_ID"] | python | 3 | 0 | |
server/services/store/sqlstore/sqlstore_test.go | package sqlstore
import (
"database/sql"
"os"
"testing"
"github.com/mattermost/focalboard/server/services/mlog"
"github.com/mattermost/focalboard/server/services/store"
"github.com/mattermost/focalboard/server/services/store/storetests"
"github.com/stretchr/testify/require"
)
func SetupTests(t *testing.T) (store.Store, func()) {
dbType := os.Getenv("FB_STORE_TEST_DB_TYPE")
if dbType == "" {
dbType = sqliteDBType
}
connectionString := os.Getenv("FB_STORE_TEST_CONN_STRING")
if connectionString == "" {
connectionString = ":memory:"
}
logger := mlog.CreateTestLogger(t)
sqlDB, err := sql.Open(dbType, connectionString)
require.NoError(t, err)
err = sqlDB.Ping()
require.NoError(t, err)
store, err := New(dbType, connectionString, "test_", logger, sqlDB)
require.Nil(t, err)
tearDown := func() {
defer func() { _ = logger.Shutdown() }()
err = store.Shutdown()
require.Nil(t, err)
}
return store, tearDown
}
func TestBlocksStore(t *testing.T) {
t.Run("BlocksStore", func(t *testing.T) { storetests.StoreTestBlocksStore(t, SetupTests) })
t.Run("SharingStore", func(t *testing.T) { storetests.StoreTestSharingStore(t, SetupTests) })
t.Run("SystemStore", func(t *testing.T) { storetests.StoreTestSystemStore(t, SetupTests) })
t.Run("UserStore", func(t *testing.T) { storetests.StoreTestUserStore(t, SetupTests) })
}
| [
"\"FB_STORE_TEST_DB_TYPE\"",
"\"FB_STORE_TEST_CONN_STRING\""
]
| []
| [
"FB_STORE_TEST_CONN_STRING",
"FB_STORE_TEST_DB_TYPE"
]
| [] | ["FB_STORE_TEST_CONN_STRING", "FB_STORE_TEST_DB_TYPE"] | go | 2 | 0 | |
components/automate-cli/cmd/chef-automate/migrate_from_v1.go | // Copyright © 2017 Chef Software
package main
import (
"context"
"fmt"
"io"
"os"
"path"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
dc "github.com/chef/automate/api/config/deployment"
api "github.com/chef/automate/api/interservice/deployment"
"github.com/chef/automate/components/automate-cli/pkg/status"
"github.com/chef/automate/components/automate-deployment/pkg/a1stub"
"github.com/chef/automate/components/automate-deployment/pkg/a1upgrade"
"github.com/chef/automate/components/automate-deployment/pkg/airgap"
"github.com/chef/automate/components/automate-deployment/pkg/client"
"github.com/chef/automate/components/automate-deployment/pkg/manifest"
mc "github.com/chef/automate/components/automate-deployment/pkg/manifest/client"
"github.com/chef/automate/lib/version"
)
var migrateFrom1Long = `Migrate an existing Chef Automate v1 deployment to Chef Automate v2.
- <CONFIG_FILE> must be a valid path to a TOML formatted configuration file`
type migrateCmdFlagSet struct {
migrateSkipPreflight bool
deliveryRunningPath string
deliverySecretsPath string
chefServerRunningPath string
migrateTomlPath string
a2ConfigPath string
hartifactsPath string
manifestDir string
channel string
overrideOrigin string
upgradeStrategy string
adminPassword string
selfTestMode bool
pgDumpSeconds int
pgRestoreSeconds int
yes bool
skipBackup bool
fileMoveTimeout int
skipBackupCheck bool
skipDisasterRecoveryCheck bool
skipExternalESCheck bool
skipFIPSCheck bool
skipSAMLCheck bool
skipWorkflowCheck bool
airgap string
// airgapPreflight only applies to the preflight-check migrate-from-v1
// subcommand; we want to reuse all the skip*Check options there but for
// preflight-check airgap is on/off vs. path to airgap bundle for the actual
// migration.
airgapPreflight bool
enableChefServer bool
enableWorkflow bool
}
var migrateCmdFlags = migrateCmdFlagSet{}
var migrateFrom1Cmd = &cobra.Command{
Use: "migrate-from-v1 [/path/to/automate-deploy.toml]",
Short: "Migrate from Chef Automate v1",
Long: migrateFrom1Long,
Args: cobra.MaximumNArgs(3),
RunE: runMigrateFromV1Cmd,
Aliases: []string{"upgrade-from-v1"},
}
var migrateFrom1StatusCmd = &cobra.Command{
Use: "migrate-from-v1-status",
Short: "Watch the status of the migration to Chef Automate 2",
RunE: runMigrationFromV1StatusCmd,
}
var generateCfgCmd = &cobra.Command{
Use: "gen-config",
Short: "Generate a config file",
Long: "Generate a Chef Automate v2 configuration file from Chef Automate v1",
RunE: runGenerateCfgCmd,
}
func init() {
// migrate-from-v1 flags
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.migrateSkipPreflight,
"skip-preflight",
false,
"Deploy regardless of pre-flight conditions")
migrateFrom1Cmd.PersistentFlags().StringVarP(
&migrateCmdFlags.deliverySecretsPath,
"delivery-secrets",
"s",
"/etc/delivery/delivery-secrets.json",
"Path to delivery-secrets.json")
migrateFrom1Cmd.PersistentFlags().StringVarP(
&migrateCmdFlags.deliveryRunningPath,
"delivery-running",
"r",
"/etc/delivery/delivery-running.json",
"Path to delivery-running.json")
migrateFrom1Cmd.PersistentFlags().StringVar(
&migrateCmdFlags.chefServerRunningPath,
"chef-server-running",
"/etc/opscode/chef-server-running.json",
"Path to chef-server-running.json")
migrateFrom1Cmd.PersistentFlags().StringVarP(
&migrateCmdFlags.a2ConfigPath,
"config",
"c",
"",
"Path to an automate-deploy.toml")
migrateFrom1Cmd.PersistentFlags().StringVar(
&migrateCmdFlags.hartifactsPath,
"hartifacts",
"",
"Optional path to cache of local .hart packages")
migrateFrom1Cmd.PersistentFlags().StringVar(
&migrateCmdFlags.overrideOrigin,
"override-origin",
"",
"Optional origin to install local .hart packages from")
migrateFrom1Cmd.PersistentFlags().StringVar(
&migrateCmdFlags.manifestDir,
"manifest-dir",
"",
"Directory of manifest files")
migrateFrom1Cmd.PersistentFlags().StringVar(
&migrateCmdFlags.channel,
"channel",
"",
"Optional channel to use when installing packages from the depot")
migrateFrom1Cmd.PersistentFlags().StringVar(
&migrateCmdFlags.upgradeStrategy,
"upgrade-strategy",
"",
"Optional upgrade strategy to use when configuring the deployment service")
migrateFrom1Cmd.PersistentFlags().IntVar(
&migrateCmdFlags.pgDumpSeconds,
"postgres-dump-wait-seconds",
0,
"Optional timeout for Chef Automate v1 PostgreSQL dump (0 to disable timeout)")
migrateFrom1Cmd.PersistentFlags().IntVar(
&migrateCmdFlags.pgRestoreSeconds,
"postgres-restore-wait-seconds",
0,
"Optional timeout for Chef Automate v1 PostgreSQL restore (0 to disable timeout)")
migrateFrom1Cmd.PersistentFlags().IntVar(
&migrateCmdFlags.fileMoveTimeout,
"file-move-timeout",
0,
"Optional timeout for moving elasticsearch, compliance, and notifications files during Chef Automate v1 migration (0 to disable timeout)")
migrateFrom1Cmd.PersistentFlags().BoolVarP(
&migrateCmdFlags.yes,
"yes",
"y",
false,
"Do not prompt for confirmation; accept defaults and continue")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.skipBackup,
"skip-backup",
false,
"Optionally skip backup of your Chef Automate v1 installation (default = false)")
migrateFrom1Cmd.PersistentFlags().StringVar(
&migrateCmdFlags.adminPassword,
"admin-password",
"",
"The password for the initial admin user. Auto-generated by default.")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.selfTestMode,
"self-test",
false,
"(DEV ONLY) execute migration against a test harness")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.enableChefServer,
"enable-chef-server",
false,
"Enable integrated Chef Server migration and deployment; only valid for all-in-one topology")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.enableWorkflow,
"enable-workflow",
false,
"Enable integrated Workflow migration and deployment; only valid for all-in-one topology")
// passwords are not validated until the end of the migration, which makes this
// feature dangerous. But we still want to have it in Ci, so we mark it as
// hidden
err := migrateFrom1Cmd.PersistentFlags().MarkHidden("admin-password")
if err != nil {
fmt.Printf("failed configuring cobra: %s\n", err.Error())
panic(":(")
}
// end users don't have any use for self-test, so don't show them
err = migrateFrom1Cmd.PersistentFlags().MarkHidden("self-test")
if err != nil {
fmt.Printf("failed configuring cobra: %s\n", err.Error())
panic(":(")
}
// a1 migration with Workflow Server will be hidden until it is fully completed
err = migrateFrom1Cmd.PersistentFlags().MarkHidden("enable-workflow")
if err != nil {
fmt.Printf("failed configuring cobra: %s\n", err.Error())
panic(":(")
}
// migrate-from-v1 gen-config flags
generateCfgCmd.PersistentFlags().StringVarP(
&migrateCmdFlags.migrateTomlPath,
"out",
"o",
"./automate-migrate.toml",
"Output file")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.skipBackupCheck,
"skip-backup-check",
false,
"Optionally do not check if your Chef Automate v1 installation has backups configured (default = false)")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.skipDisasterRecoveryCheck,
"skip-disaster-recovery-check",
false,
"Optionally do not check if your Chef Automate v1 installation has disaster recovery configured (default = false)")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.skipExternalESCheck,
"skip-external-es-check",
false,
"Optionally do not check if your Chef Automate v1 installation has external Elasticsearch configured (default = false)")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.skipFIPSCheck,
"skip-fips-check",
false,
"Optionally do not check if your Chef Automate v1 installation has FIPS configured (default = false)")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.skipSAMLCheck,
"skip-saml-check",
false,
"Optionally do not check if your Chef Automate v1 installation has SAML configured (default = false)")
migrateFrom1Cmd.PersistentFlags().BoolVar(
&migrateCmdFlags.skipWorkflowCheck,
"skip-workflow-check",
false,
"Optionally do not check if your Chef Automate v1 installation has workflow configured (default = false)")
migrateFrom1Cmd.PersistentFlags().StringVar(
&migrateCmdFlags.airgap,
"airgap-bundle",
"",
"Path to an airgap install bundle")
if !isDevMode() {
for _, flagName := range []string{
"override-origin",
"hartifacts",
"manifest-dir",
} {
err := migrateFrom1Cmd.PersistentFlags().MarkHidden(flagName)
if err != nil {
fmt.Printf("failed configuring cobra: %s\n", err.Error())
panic(":(")
}
}
}
migrateFrom1Cmd.AddCommand(generateCfgCmd)
RootCmd.AddCommand(migrateFrom1Cmd)
RootCmd.AddCommand(migrateFrom1StatusCmd)
}
func runMigrateFromV1Cmd(cmd *cobra.Command, args []string) error {
cleanup := func() {
if migrateCmdFlags.selfTestMode {
a1stub.CleanupTestHarness()
}
}
defer cleanup()
if migrateCmdFlags.selfTestMode {
err := a1stub.StartTestHarness()
if err != nil {
return status.Wrap(
err,
status.UpgradeError,
"Starting the self-test harness failed",
)
}
}
// Initialize a new migration:
// * Load the given delivery-running.json and delivery-secrets.json files
// * Generate an A2 Config if an A2 configuration file was not passed.
migration, err := newLocalMigration()
if err != nil {
return err
}
offlineMode := migrateCmdFlags.airgap != ""
manifestPath := ""
if offlineMode {
writer.Title("Installing airgap artifact")
metadata, err := airgap.Unpack(migrateCmdFlags.airgap)
if err != nil {
return status.Annotate(err, status.AirgapUnpackInstallBundleError)
}
manifestPath = api.AirgapManifestPath
pathEnv := os.Getenv("PATH")
// We need to set the PATH here to include hab so that bootstrapping A2 uses that
// hab instead of trying to download it from the internet
err = os.Setenv("PATH", fmt.Sprintf("%s:%s", path.Dir(metadata.HabBinPath), pathEnv))
if err != nil {
return err
}
} else {
manifestPath = migrateCmdFlags.manifestDir
}
manifestProvider := manifest.NewLocalHartManifestProvider(
mc.NewDefaultClient(manifestPath),
migrateCmdFlags.hartifactsPath,
migrateCmdFlags.overrideOrigin)
err = client.A1Upgrade(writer, migration, migrateCmdFlags.yes, manifestProvider, version.BuildTime, offlineMode)
if err != nil && !status.IsStatusError(err) {
return status.Annotate(err, status.UpgradeError)
}
return err
}
func runMigrationFromV1StatusCmd(cmd *cobra.Command, args []string) error {
conn, err := client.Connection(client.DefaultClientTimeout)
if err != nil {
return err
}
stream, err := conn.A1UpgradeStatus(context.Background(), &api.A1UpgradeStatusRequest{})
if err != nil {
return status.Wrap(
err,
status.DeploymentServiceCallError,
"Acquiring migration status failed",
)
}
opts := []a1upgrade.StatusHandlerOpt{
a1upgrade.WithWriter(writer),
}
if !terminal.IsTerminal(1) {
opts = append(opts, a1upgrade.NoTTY())
}
handler := a1upgrade.NewStatusHandler(opts...)
for {
statusMsg, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return status.Wrap(
err,
status.DeploymentServiceCallError,
"Reading message from the migration status stream failed",
)
}
done, err := handler.HandleStatus(statusMsg)
if err != nil {
// The StatusHandler is responsible for printing the error
return status.Wrap(
err,
status.UpgradeError,
"Streaming migration status failed",
)
}
if done {
break
}
}
return nil
}
func runGenerateCfgCmd(cmd *cobra.Command, args []string) error {
migration, err := newLocalMigration()
if err != nil {
return err
}
if err = migration.A2Config.MarshalToTOMLFile(migrateCmdFlags.migrateTomlPath, 0600); err != nil {
return status.Wrap(
err,
status.MarshalError,
"Marshaling configuration to TOML file failed",
)
}
return nil
}
func newLocalMigration() (*a1upgrade.A1Upgrade, error) {
u, err := a1upgrade.NewA1Upgrade(
a1upgrade.WithDeliveryRunning(migrateCmdFlags.deliveryRunningPath),
a1upgrade.WithDeliverySecrets(migrateCmdFlags.deliverySecretsPath),
a1upgrade.WithChefServerRunning(migrateCmdFlags.chefServerRunningPath, migrateCmdFlags.enableChefServer),
a1upgrade.WithA2ConfigPath(migrateCmdFlags.a2ConfigPath,
dc.WithHartifacts(migrateCmdFlags.hartifactsPath),
dc.WithOrigin(migrateCmdFlags.overrideOrigin)),
a1upgrade.WithHartifactsPath(migrateCmdFlags.hartifactsPath),
a1upgrade.WithOverrideOrigin(migrateCmdFlags.overrideOrigin),
a1upgrade.WithManifestDir(migrateCmdFlags.manifestDir),
a1upgrade.WithChannel(migrateCmdFlags.channel),
a1upgrade.WithUpgradeStrategy(migrateCmdFlags.upgradeStrategy),
a1upgrade.WithAdminPassword(migrateCmdFlags.adminPassword),
a1upgrade.SkipUpgradePreflight(migrateCmdFlags.migrateSkipPreflight),
a1upgrade.SetPostgresDumpWait(migrateCmdFlags.pgDumpSeconds),
a1upgrade.SetPostgresRestoreWait(migrateCmdFlags.pgRestoreSeconds),
a1upgrade.SetFileMoveTimeout(migrateCmdFlags.fileMoveTimeout),
a1upgrade.SkipUpgradeBackup(migrateCmdFlags.skipBackup),
a1upgrade.SkipBackupConfiguredCheck(migrateCmdFlags.skipBackupCheck),
a1upgrade.SkipDisasterRecoveryConfiguredCheck(migrateCmdFlags.skipDisasterRecoveryCheck),
a1upgrade.SkipExternalESConfiguredCheck(migrateCmdFlags.skipExternalESCheck),
a1upgrade.SkipFIPSConfiguredCheck(migrateCmdFlags.skipFIPSCheck),
a1upgrade.SkipSAMLConfiguredCheck(migrateCmdFlags.skipSAMLCheck),
a1upgrade.SkipWorkflowConfiguredCheck(migrateCmdFlags.skipWorkflowCheck),
a1upgrade.WithChefServerEnabled(migrateCmdFlags.enableChefServer),
a1upgrade.WithWorkflowEnabled(migrateCmdFlags.enableWorkflow),
)
if err != nil {
return nil, status.Wrap(
err,
status.UpgradeError,
"Creating A1 migrator failed",
)
}
if err := u.GenerateA2ConfigIfNoneProvided(migrateCmdFlags.a2ConfigPath); err != nil {
return nil, status.Wrap(err, status.ConfigError, "Generating Chef Automate configuration failed")
}
return u, nil
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
setup.py | """Setuptools entry point."""
import codecs
import os
import pathlib
from typing import Dict, List
try:
from setuptools import setup # type: ignore
except ImportError:
from distutils.core import setup
package_name = 'lib_ip' # type: str
required = ['lib_ping @ git+https://github.com/bitranox/lib_ping.git',
'lib_platform @ git+https://github.com/bitranox/lib_platform.git',
] # type: List[str]
required_for_tests = list() # type: List[str]
entry_points = dict() # type: Dict[str, str]
def get_version(dist_directory: str) -> str:
with open(str(pathlib.Path(__file__).parent / f'{dist_directory}/version.txt'), mode='r') as version_file:
version = version_file.readline()
return version
def is_travis_deploy() -> bool:
if 'travis_deploy' in os.environ:
if os.environ['travis_deploy'] == 'True':
return True
return False
def strip_links_from_required(l_required: List[str]) -> List[str]:
"""
>>> required = ['lib_regexp @ git+https://github.com/bitranox/lib_regexp.git', 'test']
>>> assert strip_links_from_required(required) == ['lib_regexp', 'test']
"""
l_req_stripped = list() # type: List[str]
for req in l_required:
req_stripped = req.split('@')[0].strip()
l_req_stripped.append(req_stripped)
return l_req_stripped
if is_travis_deploy():
required = strip_links_from_required(required)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
path_readme = pathlib.Path(__file__).parent / 'README.rst'
long_description = package_name
if path_readme.exists():
# noinspection PyBroadException
try:
readme_content = codecs.open(str(path_readme), encoding='utf-8').read()
long_description = readme_content
except Exception:
pass
setup(name=package_name,
version=get_version(package_name),
url='https://github.com/bitranox/{package_name}'.format(package_name=package_name),
packages=[package_name],
package_data={package_name: ['version.txt']},
description=package_name,
long_description=long_description,
long_description_content_type='text/x-rst',
author='Robert Nowotny',
author_email='[email protected]',
classifiers=CLASSIFIERS,
entry_points=entry_points,
# minimally needs to run tests - no project requirements here
tests_require=['typing',
'pathlib',
'mypy ; platform_python_implementation != "PyPy" and python_version >= "3.5"',
'pytest',
'pytest-pep8 ; python_version < "3.5"',
'pytest-pycodestyle ; python_version >= "3.5"',
'pytest-mypy ; platform_python_implementation != "PyPy" and python_version >= "3.5"'
] + required_for_tests,
# specify what a project minimally needs to run correctly
install_requires=['typing', 'pathlib'] + required + required_for_tests,
# minimally needs to run the setup script, dependencies needs also to put here for setup.py install test
# dependencies must not be put here for pip install
setup_requires=['typing',
'pathlib',
'pytest-runner']
)
| []
| []
| [
"travis_deploy"
]
| [] | ["travis_deploy"] | python | 1 | 0 | |
worker/mysqlworker/adapter.go | // Copyright 2019 PayPal Inc.
//
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"database/sql"
"errors"
"fmt"
"os"
"strings"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/paypal/hera/common"
"github.com/paypal/hera/utility/logger"
"github.com/paypal/hera/worker/shared"
)
type mysqlAdapter struct {
}
func (adapter *mysqlAdapter) MakeSqlParser() (common.SQLParser ,error) {
return common.NewRegexSQLParser()
}
// InitDB creates sql.DB object for conection to the mysql database, using "username", "password" and
// "mysql_datasource" parameters
func (adapter *mysqlAdapter) InitDB() (*sql.DB, error) {
user := os.Getenv("username")
pass := os.Getenv("password")
ds := os.Getenv("mysql_datasource")
if user == "" {
return nil, errors.New("Can't get 'username' from env")
}
if pass == "" {
return nil, errors.New("Can't get 'password' from env")
}
if ds == "" {
return nil, errors.New("Can't get 'mysql_datasource' from env")
}
var db *sql.DB
var err error
is_writable := false
for idx, curDs := range strings.Split(ds, "||") {
db, err = sql.Open("mysql", fmt.Sprintf("%s:%s@%s", user, pass, curDs))
if err != nil {
if logger.GetLogger().V(logger.Warning) {
logger.GetLogger().Log(logger.Warning, user+" failed to connect to "+curDs+fmt.Sprintf(" %d", idx))
}
continue
}
is_writable = adapter.Heartbeat(db);
if is_writable {
if logger.GetLogger().V(logger.Warning) {
logger.GetLogger().Log(logger.Warning, user+" connect success "+curDs+fmt.Sprintf(" %d", idx))
}
err = nil
break
} else {
// read only connection
if logger.GetLogger().V(logger.Warning) {
logger.GetLogger().Log(logger.Warning, "recycling, got read-only conn " /*+curDs*/)
}
err = errors.New("cannot use read-only conn "+curDs)
db.Close()
}
}
return db, err
}
// Checking master status
func (adapter *mysqlAdapter) Heartbeat(db *sql.DB) bool {
ctx, _ /*cancel*/ := context.WithTimeout(context.Background(), 10*time.Second)
writable := false
conn, err := db.Conn(ctx)
if err != nil {
if logger.GetLogger().V(logger.Warning) {
logger.GetLogger().Log(logger.Warning, "could not get connection "+err.Error())
}
return writable
}
defer conn.Close()
if strings.HasPrefix(os.Getenv("logger.LOG_PREFIX"), "WORKER ") {
stmt, err := conn.PrepareContext(ctx, "select @@global.read_only")
//stmt, err := conn.PrepareContext(ctx, "show variables where variable_name='read_only'")
if err != nil {
if logger.GetLogger().V(logger.Warning) {
logger.GetLogger().Log(logger.Warning, "query ro check err ", err.Error())
}
return false
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
if logger.GetLogger().V(logger.Warning) {
logger.GetLogger().Log(logger.Warning, "ro check err ", err.Error())
}
return false
}
defer rows.Close()
countRows := 0
if rows.Next() {
countRows++
var readOnly int
/*var nom string
rows.Scan(&nom, &readOnly) // */
rows.Scan(&readOnly)
if readOnly == 0 {
writable = true
}
}
// read only connection
if logger.GetLogger().V(logger.Debug) {
logger.GetLogger().Log(logger.Debug, "writable:", writable)
}
}
return writable
}
// UseBindNames return false because the SQL string uses ? for bind parameters
func (adapter *mysqlAdapter) UseBindNames() bool {
return false
}
/**
* @TODO infra.hera.jdbc.HeraResultSetMetaData mysql type to java type map.
*/
var colTypeMap = map[string]int{
"NULL": 0,
"CHAR": 1,
"DECIMAL": 2,
"INT": 3,
"FLOAT": 4,
"BIGINT": 8,
"DOUBLE": 22,
"BINARY": 23,
"VARCHAR": 5,
"BLOB": 113,
"CLOB": 112,
"TEXT": 112,
"DATE": 184,
"TIMESTAMP": 185,
}
func (adapter *mysqlAdapter) GetColTypeMap() map[string]int {
return colTypeMap
}
func (adapter *mysqlAdapter) ProcessError(errToProcess error, workerScope *shared.WorkerScopeType, queryScope *shared.QueryScopeType) {
errStr := errToProcess.Error()
if strings.HasPrefix(errStr, "driver: bad connection") {
if logger.GetLogger().V(logger.Warning) {
logger.GetLogger().Log(logger.Warning, "mysql ProcessError badConnRecycle "+ errStr + " sqlHash:"+ (*queryScope).SqlHash +" Cmd:"+(*queryScope).NsCmd)
}
(*workerScope).Child_shutdown_flag = true
return
}
idx := strings.Index(errStr, ":")
if idx < 0 || idx >= len(errStr) {
return
}
var errno int
fmt.Sscanf(errStr[6:idx],"%d",&errno)
if logger.GetLogger().V(logger.Warning) {
logger.GetLogger().Log(logger.Warning, "mysql ProcessError "+ errStr + " sqlHash:"+ (*queryScope).SqlHash +" Cmd:"+(*queryScope).NsCmd+fmt.Sprintf(" errno:%d",errno))
}
switch (errno) {
case 0: fallthrough // if there isn't a normal error number
case 1153: fallthrough // pkt too large
case 1154: fallthrough // read err fr pipe
case 1155: fallthrough // err fnctl
case 1156: fallthrough // pkt order
case 1157: fallthrough // err uncompress
case 1158: fallthrough // err read
case 1159: fallthrough // read timeout
case 1160: fallthrough // err write
case 1161: fallthrough // write timeout
case 1290: fallthrough // read-only mode
case 1317: fallthrough // query interupt
case 1836: fallthrough // read-only mode
case 1874: fallthrough // innodb read-only
case 1878: // temp file write fail
(*workerScope).Child_shutdown_flag = true
}
}
func (adapter *mysqlAdapter) ProcessResult(colType string, res string) string {
switch colType {
case "DATE":
var day, month, year int
fmt.Sscanf(res, "%d-%d-%d", &year, &month, &day)
return fmt.Sprintf("%02d-%02d-%d %02d:%02d:%02d.000", day, month, year, 0, 0, 0)
case "TIME":
var hour, min, sec int
fmt.Sscanf(res, "%d:%d:%d", &hour, &min, &sec)
return fmt.Sprintf("%02d-%02d-%d %02d:%02d:%02d.000", 0, 0, 0, hour, min, sec)
case "TIMESTAMP", "DATETIME":
var day, month, year, hour, min, sec int
fmt.Sscanf(res, "%d-%d-%d %d:%d:%d", &year, &month, &day, &hour, &min, &sec)
return fmt.Sprintf("%02d-%02d-%d %02d:%02d:%02d.000", day, month, year, hour, min, sec)
default:
return res
}
}
| [
"\"username\"",
"\"password\"",
"\"mysql_datasource\"",
"\"logger.LOG_PREFIX\""
]
| []
| [
"logger.LOG_PREFIX",
"username",
"mysql_datasource",
"password"
]
| [] | ["logger.LOG_PREFIX", "username", "mysql_datasource", "password"] | go | 4 | 0 | |
vendor/github.com/elastic/beats/metricbeat/module/system/filesystem/helper_test.go | // +build !integration
// +build darwin freebsd linux openbsd windows
package filesystem
import (
"os"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
sigar "github.com/elastic/gosigar"
)
func TestFileSystemList(t *testing.T) {
if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
t.Skip("FileSystem test fails on Travis/OSX with i/o error")
}
fss, err := GetFileSystemList()
if err != nil {
t.Fatal("GetFileSystemList", err)
}
assert.True(t, (len(fss) > 0))
for _, fs := range fss {
if fs.TypeName == "cdrom" {
continue
}
stat, err := GetFileSystemStat(fs)
if os.IsPermission(err) {
continue
}
if assert.NoError(t, err, "filesystem=%v: %v", fs, err) {
assert.True(t, (stat.Total >= 0))
assert.True(t, (stat.Free >= 0))
assert.True(t, (stat.Avail >= 0))
assert.True(t, (stat.Used >= 0))
}
}
}
func TestFilter(t *testing.T) {
in := []sigar.FileSystem{
{SysTypeName: "nfs"},
{SysTypeName: "ext4"},
{SysTypeName: "proc"},
{SysTypeName: "smb"},
}
out := Filter(in, BuildTypeFilter("nfs", "smb", "proc"))
if assert.Len(t, out, 1) {
assert.Equal(t, "ext4", out[0].SysTypeName)
}
}
| [
"\"TRAVIS\""
]
| []
| [
"TRAVIS"
]
| [] | ["TRAVIS"] | go | 1 | 0 | |
pkg/client/cli/command_group.go | package cli
import (
"os"
"strconv"
"github.com/moby/term"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/telepresenceio/telepresence/v2/pkg/client/cli/cliutil"
)
var userDaemonRunning = false
var commandGroupMap = make(map[string]cliutil.CommandGroups)
var globalFlagGroups []cliutil.FlagGroup
var deprecatedGlobalFlags *pflag.FlagSet
func init() {
cobra.AddTemplateFunc("commandGroups", func(cmd *cobra.Command) cliutil.CommandGroups {
return commandGroupMap[cmd.Name()]
})
cobra.AddTemplateFunc("globalFlagGroups", func() []cliutil.FlagGroup {
return globalFlagGroups
})
cobra.AddTemplateFunc("userDaemonRunning", func() bool {
return userDaemonRunning
})
cobra.AddTemplateFunc("wrappedFlagUsages", func(flags *pflag.FlagSet) string {
// This is based off of what Docker does (github.com/docker/cli/cli/cobra.go), but is
// adjusted
// 1. to take a pflag.FlagSet instead of a cobra.Command, so that we can have flag groups, and
// 2. to correct for the ways that Docker upsets me.
var cols int
var err error
// Obey COLUMNS if the shell or user sets it. (Docker doesn't do this.)
if cols, err = strconv.Atoi(os.Getenv("COLUMNS")); err == nil {
goto end
}
// Try to detect the size of the stdout file descriptor. (Docker checks stdin, not stdout.)
if ws, err := term.GetWinsize(1); err == nil {
cols = int(ws.Width)
goto end
}
// If stdout is a terminal but we were unable to get its size (I'm not sure how that can
// happen), then fall back to assuming 80. If stdou tisn't a terminal, then we leave cols
// as 0, meaning "don't wrap it". (Docker wraps it even if stdout isn't a terminal.)
if term.IsTerminal(1) {
cols = 80
goto end
}
end:
return flags.FlagUsagesWrapped(cols)
})
}
func setCommandGroups(cmd *cobra.Command, groups cliutil.CommandGroups) {
commandGroupMap[cmd.Name()] = groups
}
// AddCommandGroups adds all the groups in the given CommandGroups to the command, replaces
// the its standard usage template with a template that groups the commands according to that group.
func AddCommandGroups(cmd *cobra.Command, groups cliutil.CommandGroups) {
for _, commands := range groups {
cmd.AddCommand(commands...)
}
setCommandGroups(cmd, groups)
// Set a usage template that is derived from the default but replaces the "Available Commands"
// section with the commandGroups() from the given command
cmd.SetUsageTemplate(`Usage:{{if and (.Runnable) (not .HasAvailableSubCommands)}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Available Commands{{- if not userDaemonRunning }} (list may be incomplete because the User Daemon isn't running){{- end}}:
{{- if commandGroups .}}
{{- range $name, $commands := commandGroups .}}
{{$name}}:{{range $commands}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}
{{- else}}
{{- range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}
{{- end}}
{{- end}}
{{- if .HasAvailableLocalFlags}}
Flags:
{{.LocalNonPersistentFlags | wrappedFlagUsages | trimTrailingWhitespaces}}{{end}}{{if true}}
Global Flags:{{range $group := globalFlagGroups}}
{{$group.Name}}:
{{$group.Flags | wrappedFlagUsages | trimTrailingWhitespaces}}{{end}}{{end}}{{if .HasHelpSubCommands}}
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
Use "{{.CommandPath}} [command] --help" for more information about a command.
For complete documentation and quick-start guides, check out our website at https://www.telepresence.io{{end}}
`)
}
| [
"\"COLUMNS\""
]
| []
| [
"COLUMNS"
]
| [] | ["COLUMNS"] | go | 1 | 0 | |
src/main/java/com/mojang/minecraft/Game.java | package com.mojang.minecraft;
import com.mojang.minecraft.gamemode.CreativeGameMode;
import com.mojang.minecraft.gamemode.GameMode;
import com.mojang.minecraft.gui.*;
import com.mojang.minecraft.entities.Item;
import com.mojang.minecraft.level.Level;
import com.mojang.minecraft.level.LevelIO;
import com.mojang.minecraft.level.generator.LevelGenerator;
import com.mojang.minecraft.level.blocks.Block;
import com.mojang.minecraft.entities.mob.Mob;
import com.mojang.minecraft.model.HumanoidModel;
import com.mojang.minecraft.model.ModelManager;
import com.mojang.minecraft.net.NetworkManager;
import com.mojang.minecraft.entities.particle.ParticleManager;
import com.mojang.minecraft.phys.AABB;
import com.mojang.minecraft.player.InputHandlerImpl;
import com.mojang.minecraft.player.Player;
import com.mojang.minecraft.render.Renderer;
import com.mojang.minecraft.render.*;
import com.mojang.minecraft.render.texture.TextureLavaFX;
import com.mojang.minecraft.render.texture.TextureWaterFX;
import com.mojang.minecraft.sound.SoundManager;
import com.mojang.minecraft.sound.SoundPlayer;
import org.lwjgl.BufferUtils;
import org.lwjgl.LWJGLException;
import org.lwjgl.input.Controllers;
import org.lwjgl.input.Cursor;
import org.lwjgl.input.Keyboard;
import org.lwjgl.input.Mouse;
import org.lwjgl.opengl.Display;
import org.lwjgl.opengl.DisplayMode;
import org.lwjgl.opengl.GL11;
import org.lwjgl.util.glu.GLU;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.swing.*;
import java.awt.*;
import java.io.*;
import java.nio.IntBuffer;
public final class Game implements Runnable {
public GameMode gamemode = new CreativeGameMode(this);
public int width;
public int height;
public Level level;
public LevelRenderer levelRenderer;
public Player player;
public ParticleManager particleManager;
public SessionData session = null;
public String host;
public Canvas canvas;
public boolean levelLoaded = false;
public TextureManager textureManager;
public FontRenderer fontRenderer;
public GuiScreen currentScreen = null;
public ProgressBarDisplay progressBar = new ProgressBarDisplay(this);
public Renderer renderer = new Renderer(this);
public LevelIO levelIo;
public SoundManager sound;
public String levelName;
public int levelId;
public Robot robot;
public HUDScreen hud;
public boolean online;
public NetworkManager networkManager;
public SoundPlayer soundPlayer;
public MovingObjectPosition selected;
public GameSettings settings;
public String debug;
public boolean hasMouse;
public boolean raining;
public MainGameLoop loop;
String server;
int port;
private boolean fullscreen = false;
public final Timer timer = new Timer(20.0F);
private Cursor cursor;
private ResourceDownloadThread resourceThread;
private final MinecraftApplet applet;
public Game(Canvas canvas, MinecraftApplet applet, int width, int height, boolean fullscreen) {
levelIo = new LevelIO(progressBar);
sound = new SoundManager();
levelName = null;
levelId = 0;
online = false;
new HumanoidModel(0.0F);
selected = null;
server = null;
port = 0;
debug = "";
hasMouse = false;
raining = false;
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
} catch (Exception e) {
e.printStackTrace();
}
this.applet = applet;
new SleepForeverThread(this);
this.canvas = canvas;
this.width = width;
this.height = height;
this.fullscreen = fullscreen;
if (canvas != null) {
try {
this.robot = new Robot();
return;
} catch (AWTException var8) {
var8.printStackTrace();
}
}
}
static void checkGLError(String message) {
int error = GL11.glGetError();
if (error != 0) {
String var2 = GLU.gluErrorString(error);
System.out.println("########## GL ERROR ##########");
System.out.println("@ " + message);
System.out.println(error + ": " + var2);
System.exit(0);
}
}
public final void setCurrentScreen(GuiScreen screen) {
this.currentScreen = screen;
if (!(currentScreen instanceof ErrorScreen)) {
if (currentScreen != null) {
currentScreen.onClose();
}
if (screen == null && player.health <= 0) {
screen = new GameOverScreen();
}
if (screen != null) {
if (hasMouse) {
player.releaseAllKeys();
hasMouse = false;
if (levelLoaded) {
try {
Mouse.setNativeCursor(null);
} catch (LWJGLException var4) {
var4.printStackTrace();
}
} else {
Mouse.setGrabbed(false);
}
}
int windowWidth = width * 240 / height;
int windowHeight = 240;
screen.open(this, windowWidth, windowHeight);
online = false;
} else {
grabMouse();
}
}
}
public final void shutdown() {
try {
if (soundPlayer != null) {
SoundPlayer var1 = soundPlayer;
soundPlayer.running = false;
}
if (resourceThread != null) {
ResourceDownloadThread var4 = resourceThread;
resourceThread.running = true;
}
} catch (Exception ignored) {
}
if (!levelLoaded) {
try {
LevelIO.save(level, new FileOutputStream(new File("level.dat")));
} catch (Exception var2) {
var2.printStackTrace();
}
}
Mouse.destroy();
Keyboard.destroy();
Display.destroy();
}
public final void run() {
MainGameLoop.renderer = renderer;
MainGameLoop.timer = timer;
MainGameLoop.game = this;
MainGameLoop.running = true;
try {
if (canvas != null) {
Display.setParent(canvas);
} else if (fullscreen) {
Display.setFullscreen(true);
width = Display.getDisplayMode().getWidth();
height = Display.getDisplayMode().getHeight();
} else {
Display.setDisplayMode(new DisplayMode(width, height));
}
Display.setTitle("Minecraft 0.30");
try {
Display.create();
} catch (LWJGLException var57) {
var57.printStackTrace();
try {
Thread.sleep(1000L);
} catch (InterruptedException var56) {
}
Display.create();
}
Keyboard.create();
Mouse.create();
try {
Controllers.create();
} catch (Exception var55) {
var55.printStackTrace();
}
checkGLError("Pre startup");
GL11.glEnable(3553);
GL11.glShadeModel(7425);
GL11.glClearDepth(1.0D);
GL11.glEnable(2929);
GL11.glDepthFunc(515);
GL11.glEnable(3008);
GL11.glAlphaFunc(516, 0.0F);
GL11.glCullFace(1029);
GL11.glMatrixMode(5889);
GL11.glLoadIdentity();
GL11.glMatrixMode(5888);
checkGLError("Startup");
String appName = "minecraftclassic";
String homeDir = System.getProperty("user.home", ".");
String osName = System.getProperty("os.name").toLowerCase();
File appDir;
switch (OS.GetOperatingSystemAsInt(osName)) {
case 1, 2 -> appDir = new File(homeDir, '.' + appName + '/');
case 3 -> {
String env = System.getenv("APPDATA");
if (env != null) {
appDir = new File(env, "." + appName + '/');
} else {
appDir = new File(homeDir, '.' + appName + '/');
}
}
case 4 -> appDir = new File(homeDir, "Library/Application Support/" + appName);
default -> appDir = new File(homeDir, appName + '/');
}
if (!appDir.exists() && !appDir.mkdirs()) {
throw new RuntimeException("The working directory could not be created: " + appDir);
}
File var2 = appDir;
settings = new GameSettings(this, appDir);
textureManager = new TextureManager(settings);
textureManager.registerAnimation(new TextureLavaFX());
textureManager.registerAnimation(new TextureWaterFX());
fontRenderer = new FontRenderer(settings, "/default.png", textureManager);
IntBuffer var9;
(var9 = BufferUtils.createIntBuffer(256)).clear().limit(256);
levelRenderer = new LevelRenderer(this, textureManager);
Item.initModels();
Mob.modelCache = new ModelManager();
GL11.glViewport(0, 0, width, height);
if (server != null && session != null) {
Level level = new Level();
level.setData(8, 8, 8, new byte[512]);
setLevel(level);
} else {
try {
if (levelName != null) {
loadOnlineLevel(levelName, levelId);
} else if (!levelLoaded) {
Level levelSave = levelIo.load(new FileInputStream(new File(appDir, "level.dat")));
if (levelSave != null) {
setLevel(levelSave);
}
}
} catch (Exception ignored) { }
if (level == null) {
generateLevel(1);
}
}
particleManager = new ParticleManager(level, textureManager);
if (levelLoaded) {
try {
cursor = new Cursor(16, 16, 0, 0, 1, var9, null);
} catch (LWJGLException e) {
e.printStackTrace();
}
}
try {
soundPlayer = new SoundPlayer(settings);
try {
AudioFormat format = new AudioFormat(44100.0F, 16, 2, true, true);
soundPlayer.dataLine = AudioSystem.getSourceDataLine(format);
soundPlayer.dataLine.open(format, 4410);
soundPlayer.dataLine.start();
soundPlayer.running = true;
Thread soundThread = new Thread(soundPlayer);
soundThread.setDaemon(true);
soundThread.setPriority(10);
soundThread.start();
} catch (Exception e) {
e.printStackTrace();
soundPlayer.running = false;
}
resourceThread = new ResourceDownloadThread(var2, this);
resourceThread.start();
} catch (Exception ignored) {}
checkGLError("Post startup");
hud = new HUDScreen(this, width, height);
(new SkinDownloadThread(this)).start();
if (server != null && session != null) {
networkManager = new NetworkManager(this, server, port, this.session.username, this.session.mppass);
}
} catch (Exception e) {
e.printStackTrace();
JOptionPane.showMessageDialog(null, e.toString(), "Failed to start Minecraft", JOptionPane.ERROR_MESSAGE);
return;
}
loop.loop();
}
public final void grabMouse() {
if (!this.hasMouse) {
this.hasMouse = true;
if (this.levelLoaded) {
try {
Mouse.setNativeCursor(this.cursor);
Mouse.setCursorPosition(this.width / 2, this.height / 2);
} catch (LWJGLException var2) {
var2.printStackTrace();
}
if (this.canvas == null) {
this.canvas.requestFocus();
}
} else {
Mouse.setGrabbed(true);
}
this.setCurrentScreen(null);
MainGameLoop.lastClick = MainGameLoop.ticks + 10000;
}
}
public final void pause() {
if (this.currentScreen == null) {
this.setCurrentScreen(new PauseScreen());
}
}
void onMouseClick(int var1) {
if (var1 != 0 || MainGameLoop.blockHitTime <= 0) {
HeldBlock heldBlock;
if (var1 == 0) {
heldBlock = renderer.heldBlock;
renderer.heldBlock.offset = -1;
heldBlock.moving = true;
}
int var3;
if (var1 == 1 && (var3 = player.inventory.getSelected()) > 0 && gamemode.useItem(player, var3)) {
heldBlock = renderer.heldBlock;
this.renderer.heldBlock.pos = 0.0F;
} else if (selected == null) {
if (var1 == 0 && !(gamemode instanceof CreativeGameMode)) {
MainGameLoop.blockHitTime = 10;
}
} else {
if (selected.entityPos == 1) {
if (var1 == 0) {
selected.entity.hurt(player, 4);
return;
}
} else if (selected.entityPos == 0) {
var3 = selected.x;
int var4 = selected.y;
int var5 = selected.z;
if (var1 != 0) {
if (selected.face == 0) {
--var4;
}
if (this.selected.face == 1) {
++var4;
}
if (this.selected.face == 2) {
--var5;
}
if (this.selected.face == 3) {
++var5;
}
if (this.selected.face == 4) {
--var3;
}
if (this.selected.face == 5) {
++var3;
}
}
Block var6 = Block.blocks[this.level.getTile(var3, var4, var5)];
if (var1 == 0) {
if (var6 != Block.BEDROCK || this.player.userType >= 100) {
this.gamemode.hitBlock(var3, var4, var5);
return;
}
} else {
int var10;
if ((var10 = this.player.inventory.getSelected()) <= 0) {
return;
}
Block var8;
AABB var9;
if (((var8 = Block.blocks[this.level.getTile(var3, var4, var5)]) == null || var8 == Block.WATER || var8 == Block.STATIONARY_WATER || var8 == Block.LAVA || var8 == Block.STATIONARY_LAVA) && ((var9 = Block.blocks[var10].getCollisionBox(var3, var4, var5)) == null || (!this.player.bb.intersects(var9) && this.level.isFree(var9)))) {
if (!this.gamemode.canPlace(var10)) {
return;
}
if (this.isOnline()) {
this.networkManager.sendBlockChange(var3, var4, var5, var1, var10);
}
this.level.netSetTile(var3, var4, var5, var10);
heldBlock = this.renderer.heldBlock;
this.renderer.heldBlock.pos = 0.0F;
Block.blocks[var10].onPlace(this.level, var3, var4, var5);
}
}
}
}
}
}
public final boolean isOnline() {
return this.networkManager != null;
}
public final void generateLevel(int var1) {
String var2 = this.session != null ? this.session.username : "anonymous";
Level var4 = (new LevelGenerator(this.progressBar)).generate(var2, 128 << var1, 128 << var1, 64);
this.gamemode.prepareLevel(var4);
this.setLevel(var4);
}
public final boolean loadOnlineLevel(String levelName, int id) {
Level level = levelIo.loadOnline(host, levelName, id);
if (level == null) {
return false;
} else {
this.setLevel(level);
return true;
}
}
public final void setLevel(Level level) {
if (this.applet == null || !this.applet.getDocumentBase().getHost().equalsIgnoreCase("minecraft.net") && !this.applet.getDocumentBase().getHost().equalsIgnoreCase("www.minecraft.net") || !this.applet.getCodeBase().getHost().equalsIgnoreCase("minecraft.net") && !this.applet.getCodeBase().getHost().equalsIgnoreCase("www.minecraft.net")) {
level = null;
}
this.level = level;
if (level != null) {
level.initTransient();
this.gamemode.apply(level);
level.font = this.fontRenderer;
level.rendererContext$5cd64a7f = this;
if (!this.isOnline()) {
this.player = (Player) level.findSubclassOf(Player.class);
} else if (this.player != null) {
this.player.resetPos();
this.gamemode.preparePlayer(this.player);
level.player = this.player;
level.addEntity(this.player);
}
}
if (this.player == null) {
this.player = new Player(level);
this.player.resetPos();
this.gamemode.preparePlayer(this.player);
if (level != null) {
level.player = this.player;
}
}
if (this.player != null) {
this.player.input = new InputHandlerImpl(this.settings);
this.gamemode.apply(this.player);
}
if (this.levelRenderer != null) {
LevelRenderer var3 = this.levelRenderer;
if (this.levelRenderer.level != null) {
var3.level.removeListener(var3);
}
var3.level = level;
if (level != null) {
level.addListener(var3);
var3.refresh();
}
}
if (this.particleManager != null) {
ParticleManager var5 = this.particleManager;
if (level != null) {
level.particleEngine = var5;
}
for (int var4 = 0; var4 < 2; ++var4) {
var5.particles[var4].clear();
}
}
System.gc();
}
}
| [
"\"APPDATA\""
]
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | java | 1 | 0 | |
catfood/wsgi.py | """
WSGI config for project catfood.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'catfood.settings.local')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/github.com/portworx/sched-ops/k8s/core/core.go | package core
import (
"fmt"
"os"
"sync"
"time"
"github.com/portworx/sched-ops/task"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
storagev1client "k8s.io/client-go/kubernetes/typed/storage/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
masterLabelKey = "node-role.kubernetes.io/master"
pvcStorageProvisionerKey = "volume.beta.kubernetes.io/storage-provisioner"
labelUpdateMaxRetries = 5
)
var (
instance Ops
once sync.Once
deleteForegroundPolicy = metav1.DeletePropagationForeground
)
// Ops is an interface to perform kubernetes related operations on the core resources.
type Ops interface {
ConfigMapOps
EventOps
NamespaceOps
NodeOps
PersistentVolumeClaimOps
PodOps
SecretOps
ServiceOps
ServiceAccountOps
// SetConfig sets the config and resets the client
SetConfig(config *rest.Config)
// GetVersion gets the version from the kubernetes cluster
GetVersion() (*version.Info, error)
// ResourceExists returns true if given resource type exists in kubernetes API server
ResourceExists(schema.GroupVersionKind) (bool, error)
}
// Instance returns a singleton instance of the client.
func Instance() Ops {
once.Do(func() {
if instance == nil {
instance = &Client{}
}
})
return instance
}
// SetInstance replaces the instance with the provided one. Should be used only for testing purposes.
func SetInstance(i Ops) {
instance = i
}
// New builds a new client.
func New(kubernetes kubernetes.Interface, core corev1client.CoreV1Interface, storage storagev1client.StorageV1Interface) *Client {
return &Client{
kubernetes: kubernetes,
core: core,
storage: storage,
}
}
// NewForConfig builds a new client for the given config.
func NewForConfig(c *rest.Config) (*Client, error) {
kubernetes, err := kubernetes.NewForConfig(c)
if err != nil {
return nil, err
}
core, err := corev1client.NewForConfig(c)
if err != nil {
return nil, err
}
storage, err := storagev1client.NewForConfig(c)
if err != nil {
return nil, err
}
return &Client{
kubernetes: kubernetes,
core: core,
storage: storage,
}, nil
}
// NewInstanceFromConfigFile returns new instance of client by using given
// config file
func NewInstanceFromConfigFile(config string) (Ops, error) {
newInstance := &Client{}
err := newInstance.loadClientFromKubeconfig(config)
if err != nil {
return nil, err
}
return newInstance, nil
}
// Client is a wrapper for kubernetes core client.
type Client struct {
config *rest.Config
core corev1client.CoreV1Interface
storage storagev1client.StorageV1Interface
kubernetes kubernetes.Interface
}
// SetConfig sets the config and resets the client.
func (c *Client) SetConfig(cfg *rest.Config) {
c.config = cfg
c.core = nil
c.storage = nil
c.kubernetes = nil
}
// GetVersion returns server version
func (c *Client) GetVersion() (*version.Info, error) {
if err := c.initClient(); err != nil {
return nil, err
}
return c.kubernetes.Discovery().ServerVersion()
}
func (c *Client) ResourceExists(gvk schema.GroupVersionKind) (bool, error) {
if err := c.initClient(); err != nil {
return false, err
}
_, apiLists, err := c.kubernetes.Discovery().ServerGroupsAndResources()
if err != nil {
return false, err
}
for _, apiList := range apiLists {
if apiList.GroupVersion == gvk.GroupVersion().String() {
for _, r := range apiList.APIResources {
if r.Kind == gvk.Kind {
return true, nil
}
}
}
}
return false, nil
}
// initClient the k8s client if uninitialized
func (c *Client) initClient() error {
if c.core != nil && c.storage != nil {
return nil
}
return c.setClient()
}
// setClient instantiates a client.
func (c *Client) setClient() error {
var err error
if c.config != nil {
err = c.loadClient()
} else {
kubeconfig := os.Getenv("KUBECONFIG")
if len(kubeconfig) > 0 {
err = c.loadClientFromKubeconfig(kubeconfig)
} else {
err = c.loadClientFromServiceAccount()
}
}
return err
}
// loadClientFromServiceAccount loads a k8s client from a ServiceAccount specified in the pod running px
func (c *Client) loadClientFromServiceAccount() error {
config, err := rest.InClusterConfig()
if err != nil {
return err
}
c.config = config
return c.loadClient()
}
func (c *Client) loadClientFromKubeconfig(kubeconfig string) error {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return err
}
c.config = config
return c.loadClient()
}
func (c *Client) loadClient() error {
if c.config == nil {
return fmt.Errorf("rest config is not provided")
}
var err error
c.kubernetes, err = kubernetes.NewForConfig(c.config)
if err != nil {
return err
}
c.core, err = corev1client.NewForConfig(c.config)
if err != nil {
return err
}
c.storage, err = storagev1client.NewForConfig(c.config)
if err != nil {
return err
}
return nil
}
// WatchFunc is a callback provided to the Watch functions
// which is invoked when the given object is changed.
type WatchFunc func(object runtime.Object) error
// handleWatch is internal function that handles the watch. On channel shutdown (ie. stop watch),
// it'll attempt to reestablish its watch function.
func (c *Client) handleWatch(
watchInterface watch.Interface,
object runtime.Object,
namespace string,
fn WatchFunc,
listOptions metav1.ListOptions) {
defer watchInterface.Stop()
for {
select {
case event, more := <-watchInterface.ResultChan():
if !more {
logrus.Debug("Kubernetes watch closed (attempting to re-establish)")
t := func() (interface{}, bool, error) {
var err error
if node, ok := object.(*corev1.Node); ok {
err = c.WatchNode(node, fn)
} else if cm, ok := object.(*corev1.ConfigMap); ok {
err = c.WatchConfigMap(cm, fn)
} else if _, ok := object.(*corev1.Pod); ok {
err = c.WatchPods(namespace, fn, listOptions)
} else {
return "", false, fmt.Errorf("unsupported object: %v given to handle watch", object)
}
return "", true, err
}
if _, err := task.DoRetryWithTimeout(t, 10*time.Minute, 10*time.Second); err != nil {
logrus.WithError(err).Error("Could not re-establish the watch")
} else {
logrus.Debug("watch re-established")
}
return
}
fn(event.Object)
}
}
}
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
example-files/scope.py | #!/usr/bin/env python3
def one(word):
print('Jenny {} to the street.'.format(word))
def two(word):
print('We used the {} to fassen the two pieces of wood together.'.format(word))
def three(word):
print('He was struck by a {} of lightning'.format(word))
def context():
item = 'bolt'
one(word=item)
_ = input('what does "{}" mean in this context?'.format(item))
two(word=item)
_ = input('what does "{}" mean in this context?'.format(item))
three(word=item)
_ = input('what does "{}" mean in this context?'.format(item))
context()
| []
| []
| []
| [] | [] | python | null | null | null |
auth/config.py | import os
# Create dummy secrey key so we can use sessions
SECRET_KEY = 'c5282ad3ea38420ab8ac0326a48d3a8c'
# Create in-memory database
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# Flask-Security config
SECURITY_URL_PREFIX = "/admin"
# SECURITY_PASSWORD_HASH = "pbkdf2_sha512"
SECURITY_PASSWORD_SALT = 'bf9797d59d094abb92fdca167494a7ee'
# Flask-Security URLs, overridden because they don't put a / at the end
SECURITY_LOGIN_URL = "/login/"
SECURITY_LOGOUT_URL = "/logout/"
SECURITY_REGISTER_URL = "/register/"
SECURITY_POST_LOGIN_VIEW = "/admin/"
SECURITY_POST_LOGOUT_VIEW = "/admin/"
SECURITY_POST_REGISTER_VIEW = "/admin/"
# Flask-Security features
SECURITY_REGISTERABLE = True
SECURITY_SEND_REGISTER_EMAIL = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
| []
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | python | 1 | 0 | |
vendor/github.com/hashicorp/vault/command/auth.go | package command
import (
"bufio"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/kv-builder"
"github.com/hashicorp/vault/helper/password"
"github.com/hashicorp/vault/meta"
"github.com/mitchellh/mapstructure"
"github.com/ryanuber/columnize"
)
// AuthHandler is the interface that any auth handlers must implement
// to enable auth via the CLI.
type AuthHandler interface {
Auth(*api.Client, map[string]string) (string, error)
Help() string
}
// AuthCommand is a Command that handles authentication.
type AuthCommand struct {
meta.Meta
Handlers map[string]AuthHandler
// The fields below can be overwritten for tests
testStdin io.Reader
}
func (c *AuthCommand) Run(args []string) int {
var method, authPath string
var methods, methodHelp, noVerify bool
flags := c.Meta.FlagSet("auth", meta.FlagSetDefault)
flags.BoolVar(&methods, "methods", false, "")
flags.BoolVar(&methodHelp, "method-help", false, "")
flags.BoolVar(&noVerify, "no-verify", false, "")
flags.StringVar(&method, "method", "", "method")
flags.StringVar(&authPath, "path", "", "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
if methods {
return c.listMethods()
}
args = flags.Args()
tokenHelper, err := c.TokenHelper()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing token helper: %s\n\n"+
"Please verify that the token helper is available and properly\n"+
"configured for your system. Please refer to the documentation\n"+
"on token helpers for more information.",
err))
return 1
}
// token is where the final token will go
handler := c.Handlers[method]
// Read token from stdin if first arg is exactly "-"
var stdin io.Reader = os.Stdin
if c.testStdin != nil {
stdin = c.testStdin
}
if len(args) > 0 && args[0] == "-" {
stdinR := bufio.NewReader(stdin)
args[0], err = stdinR.ReadString('\n')
if err != nil && err != io.EOF {
c.Ui.Error(fmt.Sprintf("Error reading from stdin: %s", err))
return 1
}
args[0] = strings.TrimSpace(args[0])
}
if method == "" {
token := ""
if len(args) > 0 {
token = args[0]
}
handler = &tokenAuthHandler{Token: token}
args = nil
switch authPath {
case "", "auth/token":
default:
c.Ui.Error("Token authentication does not support custom paths")
return 1
}
}
if handler == nil {
methods := make([]string, 0, len(c.Handlers))
for k := range c.Handlers {
methods = append(methods, k)
}
sort.Strings(methods)
c.Ui.Error(fmt.Sprintf(
"Unknown authentication method: %s\n\n"+
"Please use a supported authentication method. The list of supported\n"+
"authentication methods is shown below. Note that this list may not\n"+
"be exhaustive: Vault may support other auth methods. For auth methods\n"+
"unsupported by the CLI, please use the HTTP API.\n\n"+
"%s",
method,
strings.Join(methods, ", ")))
return 1
}
if methodHelp {
c.Ui.Output(handler.Help())
return 0
}
// Warn if the VAULT_TOKEN environment variable is set, as that will take
// precedence
if os.Getenv("VAULT_TOKEN") != "" {
c.Ui.Output("==> WARNING: VAULT_TOKEN environment variable set!\n")
c.Ui.Output(" The environment variable takes precedence over the value")
c.Ui.Output(" set by the auth command. Either update the value of the")
c.Ui.Output(" environment variable or unset it to use the new token.\n")
}
var vars map[string]string
if len(args) > 0 {
builder := kvbuilder.Builder{Stdin: os.Stdin}
if err := builder.Add(args...); err != nil {
c.Ui.Error(err.Error())
return 1
}
if err := mapstructure.Decode(builder.Map(), &vars); err != nil {
c.Ui.Error(fmt.Sprintf("Error parsing options: %s", err))
return 1
}
} else {
vars = make(map[string]string)
}
// Build the client so we can auth
client, err := c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client to auth: %s", err))
return 1
}
if authPath != "" {
vars["mount"] = authPath
}
// Authenticate
token, err := handler.Auth(client, vars)
if err != nil {
c.Ui.Error(err.Error())
return 1
}
// Cache the previous token so that it can be restored if authentication fails
var previousToken string
if previousToken, err = tokenHelper.Get(); err != nil {
c.Ui.Error(fmt.Sprintf("Error caching the previous token: %s\n\n", err))
return 1
}
// Store the token!
if err := tokenHelper.Store(token); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error storing token: %s\n\n"+
"Authentication was not successful and did not persist.\n"+
"Please reauthenticate, or fix the issue above if possible.",
err))
return 1
}
if noVerify {
c.Ui.Output(fmt.Sprintf(
"Authenticated - no token verification has been performed.",
))
return 0
}
// Build the client again so it can read the token we just wrote
client, err = c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client to verify the token: %s", err))
if err := tokenHelper.Store(previousToken); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error restoring the previous token: %s\n\n"+
"Please reauthenticate with a valid token.",
err))
}
return 1
}
// Verify the token
secret, err := client.Auth().Token().LookupSelf()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error validating token: %s", err))
if err := tokenHelper.Store(previousToken); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error restoring the previous token: %s\n\n"+
"Please reauthenticate with a valid token.",
err))
}
return 1
}
if secret == nil {
c.Ui.Error(fmt.Sprintf("Error: Invalid token"))
if err := tokenHelper.Store(previousToken); err != nil {
c.Ui.Error(fmt.Sprintf(
"Error restoring the previous token: %s\n\n"+
"Please reauthenticate with a valid token.",
err))
}
return 1
}
// Get the policies we have
policiesRaw, ok := secret.Data["policies"]
if !ok {
policiesRaw = []string{"unknown"}
}
var policies []string
for _, v := range policiesRaw.([]interface{}) {
policies = append(policies, v.(string))
}
output := "Successfully authenticated! You are now logged in."
if method != "" {
output += "\nThe token below is already saved in the session. You do not"
output += "\nneed to \"vault auth\" again with the token."
}
output += fmt.Sprintf("\ntoken: %s", secret.Data["id"])
output += fmt.Sprintf("\ntoken_duration: %s", secret.Data["ttl"].(json.Number).String())
if len(policies) > 0 {
output += fmt.Sprintf("\ntoken_policies: %v", policies)
}
c.Ui.Output(output)
return 0
}
func (c *AuthCommand) listMethods() int {
client, err := c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing client: %s", err))
return 1
}
auth, err := client.Sys().ListAuth()
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error reading auth table: %s", err))
return 1
}
paths := make([]string, 0, len(auth))
for path := range auth {
paths = append(paths, path)
}
sort.Strings(paths)
columns := []string{"Path | Type | Default TTL | Max TTL | Description"}
for _, path := range paths {
auth := auth[path]
defTTL := "system"
if auth.Config.DefaultLeaseTTL != 0 {
defTTL = strconv.Itoa(auth.Config.DefaultLeaseTTL)
}
maxTTL := "system"
if auth.Config.MaxLeaseTTL != 0 {
maxTTL = strconv.Itoa(auth.Config.MaxLeaseTTL)
}
columns = append(columns, fmt.Sprintf(
"%s | %s | %s | %s | %s", path, auth.Type, defTTL, maxTTL, auth.Description))
}
c.Ui.Output(columnize.SimpleFormat(columns))
return 0
}
func (c *AuthCommand) Synopsis() string {
return "Prints information about how to authenticate with Vault"
}
func (c *AuthCommand) Help() string {
helpText := `
Usage: vault auth [options] [auth-information]
Authenticate with Vault with the given token or via any supported
authentication backend.
By default, the -method is assumed to be token. If not supplied via the
command-line, a prompt for input will be shown. If the authentication
information is "-", it will be read from stdin.
The -method option allows alternative authentication methods to be used,
such as userpass, GitHub, or TLS certificates. For these, additional
values as "key=value" pairs may be required. For example, to authenticate
to the userpass auth backend:
$ vault auth -method=userpass username=my-username
Use "-method-help" to get help for a specific method.
If an auth backend is enabled at a different path, the "-method" flag
should still point to the canonical name, and the "-path" flag should be
used. If a GitHub auth backend was mounted as "github-private", one would
authenticate to this backend via:
$ vault auth -method=github -path=github-private
The value of the "-path" flag is supplied to auth providers as the "mount"
option in the payload to specify the mount point.
General Options:
` + meta.GeneralOptionsUsage() + `
Auth Options:
-method=name Outputs help for the authentication method with the given
name for the remote server. If this authentication method
is not available, exit with code 1.
-method-help If set, the help for the selected method will be shown.
-methods List the available auth methods.
-no-verify Do not verify the token after creation; avoids a use count
decrement.
-path The path at which the auth backend is enabled. If an auth
backend is mounted at multiple paths, this option can be
used to authenticate against specific paths.
`
return strings.TrimSpace(helpText)
}
// tokenAuthHandler handles retrieving the token from the command-line.
type tokenAuthHandler struct {
Token string
}
func (h *tokenAuthHandler) Auth(*api.Client, map[string]string) (string, error) {
token := h.Token
if token == "" {
var err error
// No arguments given, read the token from user input
fmt.Printf("Token (will be hidden): ")
token, err = password.Read(os.Stdin)
fmt.Printf("\n")
if err != nil {
return "", fmt.Errorf(
"Error attempting to ask for token. The raw error message\n"+
"is shown below, but the most common reason for this error is\n"+
"that you attempted to pipe a value into auth. If you want to\n"+
"pipe the token, please pass '-' as the token argument.\n\n"+
"Raw error: %s", err)
}
}
if token == "" {
return "", fmt.Errorf(
"A token must be passed to auth. Please view the help\n" +
"for more information.")
}
return token, nil
}
func (h *tokenAuthHandler) Help() string {
help := `
No method selected with the "-method" flag, so the "auth" command assumes
you'll be using raw token authentication. For this, specify the token to
authenticate as as the parameter to "vault auth". Example:
vault auth 123456
The token used to authenticate must come from some other source. A root
token is created when Vault is first initialized. After that, subsequent
tokens are created via the API or command line interface (with the
"token"-prefixed commands).
`
return strings.TrimSpace(help)
}
| [
"\"VAULT_TOKEN\""
]
| []
| [
"VAULT_TOKEN"
]
| [] | ["VAULT_TOKEN"] | go | 1 | 0 | |
train_unet.py | '''
-----------------------------------
TRAINING CODE - SHIFTVARCONV + UNET
-----------------------------------
'''
import os
import numpy as np
import torch
import torch.nn as nn
import logging
import glob
import argparse
import time
from torch.utils import data
## set random seed
torch.manual_seed(12)
np.random.seed(12)
from logger import Logger
from dataloader import Dataset_load
from sensor import C2B
from unet import UNet
import utils
## parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--expt', type=str, required=True, help='expt name')
parser.add_argument('--epochs', type=int, default=500, help='num epochs to train')
parser.add_argument('--batch', type=int, required=True, help='batch size for training and validation')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--blocksize', type=int, default=8, help='tile size for code default 3x3')
parser.add_argument('--subframes', type=int, default=16, help='num sub frames')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to load')
parser.add_argument('--mask', type=str, default='random', help='"impulse" or "random" or "opt"')
parser.add_argument('--two_bucket', action='store_true', help='1 bucket or 2 buckets')
parser.add_argument('--gpu', type=str, required=True, help='GPU ID')
args = parser.parse_args()
# print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
## params for DataLoader
train_params = {'batch_size': args.batch,
'shuffle': True,
'num_workers': 20,
'pin_memory': True}
val_params = {'batch_size': args.batch,
'shuffle': False,
'num_workers': 20,
'pin_memory': True}
lr = args.lr
num_epochs = args.epochs
save_path = os.path.join('/data/prasan/anupama/', args.expt)
utils.create_dirs(save_path)
## tensorboard summary logger
logger = Logger(os.path.join(save_path, 'logs'))
## configure runtime logging
logging.basicConfig(level=logging.INFO,
filename=os.path.join(save_path, 'logs', 'logfile.log'),
format='%(asctime)s - %(message)s',
filemode='w' if not args.ckpt else 'a')
# logger=logging.getLogger()#.setLevel(logging.INFO)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
logging.getLogger('').addHandler(console)
logging.info(args)
## dataloaders using hdf5 file
# data_path = '/data/prasan/anupama/dataset/GoPro_patches_ds2_s16-8_p64-32.hdf5'
data_path = '/data/prasan/anupama/dataset/GoPro_patches_ds2_s7-7_p64-32.hdf5'
## initializing training and validation data generators
training_set = Dataset_load(data_path, dataset='train', num_samples='all')
training_generator = data.DataLoader(training_set, **train_params)
logging.info('Loaded training set: %d videos'%(len(training_set)))
validation_set = Dataset_load(data_path, dataset='test', num_samples=60000)
validation_generator = data.DataLoader(validation_set, **val_params)
logging.info('Loaded validation set: %d videos'%(len(validation_set)))
## initialize nets
# c2b = C2B(block_size=args.blocksize, sub_frames=args.subframes, mask=args.mask, two_bucket=args.two_bucket).cuda()
if not args.two_bucket:
uNet = UNet(in_channel=1, out_channel=args.subframes, instance_norm=False).cuda()
else:
uNet = UNet(in_channel=2, out_channel=args.subframes, instance_norm=False).cuda()
# uNet = UNet(n_channels=1, n_classes=16).cuda()
## optimizer
optimizer = torch.optim.Adam(list(uNet.parameters()), lr=lr, weight_decay=1e-5)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.9,
patience=5, min_lr=1e-6, verbose=True)
## load checkpoint
if args.ckpt is None:
start_epoch = 0
logging.info('No checkpoint, initialized net')
else:
ckpt = torch.load(os.path.join(save_path, 'model', args.ckpt))
# c2b.load_state_dict(ckpt['c2b_state_dict'])
uNet.load_state_dict(ckpt['unet_state_dict'])
optimizer.load_state_dict(ckpt['opt_state_dict'])
start_epoch = ckpt['epoch'] + 1
uNet.train()
logging.info('Loaded checkpoint from epoch %d'%(start_epoch-1))
# torch.save(c2b.code, os.path.join(save_path, 'model', 'exposure_code.pth'))
logging.info('Starting training')
for i in range(start_epoch, start_epoch+num_epochs):
## TRAINING
train_iter = 0
final_loss_sum = 0.
tv_loss_sum = 0.
loss_sum = 0.
psnr_sum = 0.
for gt_vid in training_generator:
gt_vid = gt_vid.cuda()
if not args.two_bucket:
# b1 = c2b(gt_vid) # (N,1,H,W)
b1 = torch.mean(gt_vid, dim=1, keepdim=True)
# interm_vid = utils.impulse_inverse(b1, block_size=args.blocksize)
# assert interm_vid.shape == gt_vid.shape
highres_vid = uNet(b1) # (N,16,H,W)
else:
b1, b0 = c2b(gt_vid)
b_stack = torch.cat([b1,b0], dim=1)
highres_vid = uNet(b_stack)
psnr_sum += utils.compute_psnr(highres_vid, gt_vid).item()
## LOSSES
final_loss = utils.weighted_L1loss(highres_vid, gt_vid)
final_loss_sum += final_loss.item()
tv_loss = utils.gradx(highres_vid).abs().mean() + utils.grady(highres_vid).abs().mean()
tv_loss_sum += tv_loss.item()
loss = final_loss + 0.1*tv_loss
loss_sum += loss.item()
## BACKPROP
optimizer.zero_grad()
loss.backward()
optimizer.step()
if train_iter % 1000 == 0:
logging.info('epoch: %3d \t iter: %5d \t loss: %.4f'%(i, train_iter, loss.item()))
train_iter += 1
logging.info('Total train iterations: %d'%(train_iter))
logging.info('Finished epoch %3d with loss: %.4f psnr: %.4f'
%(i, loss_sum/train_iter, psnr_sum/len(training_set)))
## dump tensorboard summaries
logger.scalar_summary(tag='training/loss', value=loss_sum/train_iter, step=i)
logger.scalar_summary(tag='training/final_loss', value=final_loss_sum/train_iter, step=i)
logger.scalar_summary(tag='training/tv_loss', value=tv_loss_sum/train_iter, step=i)
logger.scalar_summary(tag='training/psnr', value=psnr_sum/len(training_set), step=i)
logging.info('Dumped tensorboard summaries for epoch %4d'%(i))
## VALIDATION
if ((i+1) % 2 == 0) or ((i+1) == (start_epoch+num_epochs)):
logging.info('Starting validation')
val_iter = 0
val_loss_sum = 0.
val_psnr_sum = 0.
val_ssim_sum = 0.
uNet.eval()
with torch.no_grad():
for gt_vid in validation_generator:
gt_vid = gt_vid.cuda()
if not args.two_bucket:
# b1 = c2b(gt_vid) # (N,1,H,W)
b1 = torch.mean(gt_vid, dim=1, keepdim=True)
# interm_vid = utils.impulse_inverse(b1, block_size=args.blocksize)
highres_vid = uNet(b1) # (N,16,H,W)
else:
b1, b0 = c2b(gt_vid)
b_stack = torch.cat([b1,b0], dim=1)
highres_vid = uNet(b_stack)
val_psnr_sum += utils.compute_psnr(highres_vid, gt_vid).item()
val_ssim_sum += utils.compute_ssim(highres_vid, gt_vid).item()
## loss
final_loss = utils.weighted_L1loss(highres_vid, gt_vid)
tv_loss = utils.gradx(highres_vid).abs().mean() + utils.grady(highres_vid).abs().mean()
val_loss_sum += (final_loss + 0.1*tv_loss).item()
if val_iter % 1000 == 0:
print('In val iter %d'%(val_iter))
val_iter += 1
logging.info('Total val iterations: %d'%(val_iter))
logging.info('Finished validation with loss: %.4f psnr: %.4f ssim: %.4f'
%(val_loss_sum/val_iter, val_psnr_sum/len(validation_set), val_ssim_sum/len(validation_set)))
scheduler.step(val_loss_sum/val_iter)
uNet.train()
## dump tensorboard summaries
logger.scalar_summary(tag='validation/loss', value=val_loss_sum/val_iter, step=i)
logger.scalar_summary(tag='validation/psnr', value=val_psnr_sum/len(validation_set), step=i)
logger.scalar_summary(tag='validation/ssim', value=val_ssim_sum/len(validation_set), step=i)
scheduler.step(val_loss_sum/val_iter)
## CHECKPOINT
if ((i+1) % 10 == 0) or ((i+1) == (start_epoch+num_epochs)):
utils.save_checkpoint(state={'epoch': i,
'unet_state_dict': uNet.state_dict(),
# 'c2b_state_dict': c2b.state_dict(),
'opt_state_dict': optimizer.state_dict()},
save_path=os.path.join(save_path, 'model'),
filename='model_%.6d.pth'%(i))
logging.info('Saved checkpoint for epoch {}'.format(i))
logger.writer.flush()
logging.info('Finished training') | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
backend/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dark_tooth_29082.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
guts/os/os_test.go | // Copyright (c) Tendermint. All rights reserved.
// Use of this source code is governed by Apache License 2.0 that can be
// found in the LICENSE_APACHE_2.0 file.
package os_test
// From: https://github.com/tendermint/tendermint/blob/f28d629e280ddcdc0dd644ccf1586d73dddfb7a1/libs/os/os_test.go
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"syscall"
"testing"
"time"
"github.com/stretchr/testify/require"
gos "github.com/daotl/guts/os"
)
func TestCopyFile(t *testing.T) {
tmpfile, err := os.CreateTemp("", "example")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpfile.Name())
content := []byte("hello world")
if _, err := tmpfile.Write(content); err != nil {
t.Fatal(err)
}
copyfile := fmt.Sprintf("%s.copy", tmpfile.Name())
if err := gos.CopyFile(tmpfile.Name(), copyfile); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(copyfile); os.IsNotExist(err) {
t.Fatal("copy should exist")
}
data, err := os.ReadFile(copyfile)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, content) {
t.Fatalf("copy file content differs: expected %v, got %v", content, data)
}
os.Remove(copyfile)
}
func TestTrapSignal(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip()
}
if os.Getenv("TM_TRAP_SIGNAL_TEST") == "1" {
t.Log("inside test process")
killer()
return
}
cmd, _, mockStderr := newTestProgram(t, "TM_TRAP_SIGNAL_TEST")
err := cmd.Run()
if err == nil {
wantStderr := "exiting"
if mockStderr.String() != wantStderr {
t.Fatalf("stderr: want %q, got %q", wantStderr, mockStderr.String())
}
return
}
if e, ok := err.(*exec.ExitError); ok && !e.Success() {
t.Fatalf("wrong exit code, want 0, got %d", e.ExitCode())
}
t.Fatal("this error should not be triggered")
}
func TestEnsureDir(t *testing.T) {
tmp, err := os.MkdirTemp("", "ensure-dir")
require.NoError(t, err)
defer os.RemoveAll(tmp)
// Should be possible to create a new directory.
err = gos.EnsureDir(filepath.Join(tmp, "dir"), 0755)
require.NoError(t, err)
require.DirExists(t, filepath.Join(tmp, "dir"))
// Should succeed on existing directory.
err = gos.EnsureDir(filepath.Join(tmp, "dir"), 0755)
require.NoError(t, err)
// Should fail on file.
err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644)
require.NoError(t, err)
err = gos.EnsureDir(filepath.Join(tmp, "file"), 0755)
require.Error(t, err)
// Should allow symlink to dir.
err = os.Symlink(filepath.Join(tmp, "dir"), filepath.Join(tmp, "linkdir"))
require.NoError(t, err)
err = gos.EnsureDir(filepath.Join(tmp, "linkdir"), 0755)
require.NoError(t, err)
// Should error on symlink to file.
err = os.Symlink(filepath.Join(tmp, "file"), filepath.Join(tmp, "linkfile"))
require.NoError(t, err)
err = gos.EnsureDir(filepath.Join(tmp, "linkfile"), 0755)
require.Error(t, err)
}
type mockLogger struct{}
func (ml mockLogger) Info(args ...any) {}
func killer() {
logger := mockLogger{}
gos.TrapSignal(logger, func() { _, _ = fmt.Fprintf(os.Stderr, "exiting") })
time.Sleep(1 * time.Second)
p, err := os.FindProcess(os.Getpid())
if err != nil {
panic(err)
}
if err := p.Signal(syscall.SIGTERM); err != nil {
panic(err)
}
time.Sleep(1 * time.Second)
}
func newTestProgram(t *testing.T, environVar string) (cmd *exec.Cmd, stdout *bytes.Buffer, stderr *bytes.Buffer) {
t.Helper()
cmd = exec.Command(os.Args[0], "-test.run="+t.Name())
stdout, stderr = bytes.NewBufferString(""), bytes.NewBufferString("")
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", environVar))
cmd.Stdout = stdout
cmd.Stderr = stderr
return
}
// Ensure that using CopyFile does not truncate the destination file before
// the origin is positively a non-directory and that it is ready for copying.
// See https://github.com/tendermint/tendermint/issues/6427
func TestTrickedTruncation(t *testing.T) {
tmpDir, err := os.MkdirTemp(os.TempDir(), "pwn_truncate")
if err != nil {
t.Fatal(err)
}
defer os.Remove(tmpDir)
originalWALPath := filepath.Join(tmpDir, "wal")
originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!")
if err := os.WriteFile(originalWALPath, originalWALContent, 0755); err != nil {
t.Fatal(err)
}
// 1. Sanity check.
readWAL, err := os.ReadFile(originalWALPath)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(readWAL, originalWALContent) {
t.Fatalf("Cannot proceed as the content does not match\nGot: %q\nWant: %q", readWAL, originalWALContent)
}
// 2. Now cause the truncation of the original file.
// It is absolutely legal to invoke os.Open on a directory.
if err := gos.CopyFile(tmpDir, originalWALPath); err == nil {
t.Fatal("Expected an error")
}
// 3. Check the WAL's content
reReadWAL, err := os.ReadFile(originalWALPath)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(reReadWAL, originalWALContent) {
t.Fatalf("Oops, the WAL's content was changed :(\nGot: %q\nWant: %q", reReadWAL, originalWALContent)
}
}
| [
"\"TM_TRAP_SIGNAL_TEST\""
]
| []
| [
"TM_TRAP_SIGNAL_TEST"
]
| [] | ["TM_TRAP_SIGNAL_TEST"] | go | 1 | 0 | |
flink-runtime/src/main/java/org/apache/flink/runtime/clusterframework/TaskExecutorResourceUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.clusterframework;
import org.apache.flink.api.common.resources.CPUResource;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.IllegalConfigurationException;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.configuration.NettyShuffleEnvironmentOptions;
import org.apache.flink.configuration.TaskManagerOptions;
import org.apache.flink.runtime.clusterframework.types.ResourceProfile;
import org.apache.flink.runtime.util.ConfigurationParserUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* Utility class for TaskExecutor memory configurations.
*
* <p>See {@link TaskExecutorResourceSpec} for details about memory components of TaskExecutor and their relationships.
*/
public class TaskExecutorResourceUtils {
private static final Logger LOG = LoggerFactory.getLogger(TaskExecutorResourceUtils.class);
private TaskExecutorResourceUtils() {}
// ------------------------------------------------------------------------
// Generating JVM Parameters
// ------------------------------------------------------------------------
public static String generateJvmParametersStr(final TaskExecutorResourceSpec taskExecutorResourceSpec) {
final MemorySize jvmHeapSize = taskExecutorResourceSpec.getJvmHeapMemorySize();
final MemorySize jvmDirectSize = taskExecutorResourceSpec.getJvmDirectMemorySize();
final MemorySize jvmMetaspaceSize = taskExecutorResourceSpec.getJvmMetaspaceSize();
return "-Xmx" + jvmHeapSize.getBytes()
+ " -Xms" + jvmHeapSize.getBytes()
+ " -XX:MaxDirectMemorySize=" + jvmDirectSize.getBytes()
+ " -XX:MaxMetaspaceSize=" + jvmMetaspaceSize.getBytes();
}
// ------------------------------------------------------------------------
// Generating Dynamic Config Options
// ------------------------------------------------------------------------
public static String generateDynamicConfigsStr(final TaskExecutorResourceSpec taskExecutorResourceSpec) {
final Map<String, String> configs = new HashMap<>();
configs.put(TaskManagerOptions.CPU_CORES.key(),
String.valueOf(taskExecutorResourceSpec.getCpuCores().getValue().doubleValue()));
configs.put(TaskManagerOptions.FRAMEWORK_HEAP_MEMORY.key(), taskExecutorResourceSpec.getFrameworkHeapSize().getBytes() + "b");
configs.put(TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY.key(), taskExecutorResourceSpec.getFrameworkOffHeapMemorySize().getBytes() + "b");
configs.put(TaskManagerOptions.TASK_HEAP_MEMORY.key(), taskExecutorResourceSpec.getTaskHeapSize().getBytes() + "b");
configs.put(TaskManagerOptions.TASK_OFF_HEAP_MEMORY.key(), taskExecutorResourceSpec.getTaskOffHeapSize().getBytes() + "b");
configs.put(TaskManagerOptions.NETWORK_MEMORY_MIN.key(), taskExecutorResourceSpec.getNetworkMemSize().getBytes() + "b");
configs.put(TaskManagerOptions.NETWORK_MEMORY_MAX.key(), taskExecutorResourceSpec.getNetworkMemSize().getBytes() + "b");
configs.put(TaskManagerOptions.MANAGED_MEMORY_SIZE.key(), taskExecutorResourceSpec.getManagedMemorySize().getBytes() + "b");
return assembleDynamicConfigsStr(configs);
}
private static String assembleDynamicConfigsStr(final Map<String, String> configs) {
final StringBuilder sb = new StringBuilder();
for (Map.Entry<String, String> entry : configs.entrySet()) {
sb.append("-D ").append(entry.getKey()).append("=").append(entry.getValue()).append(" ");
}
return sb.toString();
}
// ------------------------------------------------------------------------
// Generating Slot Resource Profiles
// ------------------------------------------------------------------------
public static List<ResourceProfile> createDefaultWorkerSlotProfiles(
TaskExecutorResourceSpec taskExecutorResourceSpec,
int numberOfSlots) {
final ResourceProfile resourceProfile =
generateDefaultSlotResourceProfile(taskExecutorResourceSpec, numberOfSlots);
return Collections.nCopies(numberOfSlots, resourceProfile);
}
public static ResourceProfile generateDefaultSlotResourceProfile(
TaskExecutorResourceSpec taskExecutorResourceSpec,
int numberOfSlots) {
return ResourceProfile.newBuilder()
.setCpuCores(taskExecutorResourceSpec.getCpuCores().divide(numberOfSlots))
.setTaskHeapMemory(taskExecutorResourceSpec.getTaskHeapSize().divide(numberOfSlots))
.setTaskOffHeapMemory(taskExecutorResourceSpec.getTaskOffHeapSize().divide(numberOfSlots))
.setManagedMemory(taskExecutorResourceSpec.getManagedMemorySize().divide(numberOfSlots))
.setNetworkMemory(taskExecutorResourceSpec.getNetworkMemSize().divide(numberOfSlots))
.build();
}
public static ResourceProfile generateTotalAvailableResourceProfile(TaskExecutorResourceSpec taskExecutorResourceSpec) {
return ResourceProfile.newBuilder()
.setCpuCores(taskExecutorResourceSpec.getCpuCores())
.setTaskHeapMemory(taskExecutorResourceSpec.getTaskHeapSize())
.setTaskOffHeapMemory(taskExecutorResourceSpec.getTaskOffHeapSize())
.setManagedMemory(taskExecutorResourceSpec.getManagedMemorySize())
.setNetworkMemory(taskExecutorResourceSpec.getNetworkMemSize())
.build();
}
// ------------------------------------------------------------------------
// Memory Configuration Calculations
// ------------------------------------------------------------------------
public static TaskExecutorResourceSpecBuilder newResourceSpecBuilder(final Configuration config) {
return TaskExecutorResourceSpecBuilder.newBuilder(config);
}
public static TaskExecutorResourceSpec resourceSpecFromConfig(final Configuration config) {
if (isTaskHeapMemorySizeExplicitlyConfigured(config) && isManagedMemorySizeExplicitlyConfigured(config)) {
// both task heap memory and managed memory are configured, use these to derive total flink memory
return deriveResourceSpecWithExplicitTaskAndManagedMemory(config);
} else if (isTotalFlinkMemorySizeExplicitlyConfigured(config)) {
// either of task heap memory and managed memory is not configured, total flink memory is configured,
// derive from total flink memory
return deriveResourceSpecWithTotalFlinkMemory(config);
} else if (isTotalProcessMemorySizeExplicitlyConfigured(config)) {
// total flink memory is not configured, total process memory is configured,
// derive from total process memory
return deriveResourceSpecWithTotalProcessMemory(config);
} else {
throw new IllegalConfigurationException(String.format("Either Task Heap Memory size (%s) and Managed Memory size (%s), or Total Flink"
+ " Memory size (%s), or Total Process Memory size (%s) need to be configured explicitly.",
TaskManagerOptions.TASK_HEAP_MEMORY.key(),
TaskManagerOptions.MANAGED_MEMORY_SIZE.key(),
TaskManagerOptions.TOTAL_FLINK_MEMORY.key(),
TaskManagerOptions.TOTAL_PROCESS_MEMORY.key()));
}
}
public static boolean isTaskExecutorResourceExplicitlyConfigured(final Configuration config) {
return (isTaskHeapMemorySizeExplicitlyConfigured(config) && isManagedMemorySizeExplicitlyConfigured(config))
|| isTotalFlinkMemorySizeExplicitlyConfigured(config)
|| isTotalProcessMemorySizeExplicitlyConfigured(config);
}
private static TaskExecutorResourceSpec deriveResourceSpecWithExplicitTaskAndManagedMemory(final Configuration config) {
// derive flink internal memory from explicitly configure task heap memory size and managed memory size
final MemorySize taskHeapMemorySize = getTaskHeapMemorySize(config);
final MemorySize managedMemorySize = getManagedMemorySize(config);
final MemorySize frameworkHeapMemorySize = getFrameworkHeapMemorySize(config);
final MemorySize frameworkOffHeapMemorySize = getFrameworkOffHeapMemorySize(config);
final MemorySize taskOffHeapMemorySize = getTaskOffHeapMemorySize(config);
final MemorySize networkMemorySize;
final MemorySize totalFlinkExcludeNetworkMemorySize =
frameworkHeapMemorySize.add(frameworkOffHeapMemorySize).add(taskHeapMemorySize).add(taskOffHeapMemorySize).add(managedMemorySize);
if (isTotalFlinkMemorySizeExplicitlyConfigured(config)) {
// derive network memory from total flink memory, and check against network min/max
final MemorySize totalFlinkMemorySize = getTotalFlinkMemorySize(config);
if (totalFlinkExcludeNetworkMemorySize.getBytes() > totalFlinkMemorySize.getBytes()) {
throw new IllegalConfigurationException(
"Sum of configured Framework Heap Memory (" + frameworkHeapMemorySize.toString()
+ "), Framework Off-Heap Memory (" + frameworkOffHeapMemorySize.toString()
+ "), Task Heap Memory (" + taskHeapMemorySize.toString()
+ "), Task Off-Heap Memory (" + taskOffHeapMemorySize.toString()
+ ") and Managed Memory (" + managedMemorySize.toString()
+ ") exceed configured Total Flink Memory (" + totalFlinkMemorySize.toString() + ").");
}
networkMemorySize = totalFlinkMemorySize.subtract(totalFlinkExcludeNetworkMemorySize);
sanityCheckNetworkMemoryWithExplicitlySetTotalFlinkAndHeapMemory(config, networkMemorySize, totalFlinkMemorySize);
} else {
// derive network memory from network configs
if (isUsingLegacyNetworkConfigs(config)) {
networkMemorySize = getNetworkMemorySizeWithLegacyConfig(config);
} else {
networkMemorySize = deriveNetworkMemoryWithInverseFraction(config, totalFlinkExcludeNetworkMemorySize);
}
}
final FlinkInternalMemory flinkInternalMemory = new FlinkInternalMemory(
frameworkHeapMemorySize,
frameworkOffHeapMemorySize,
taskHeapMemorySize,
taskOffHeapMemorySize,
networkMemorySize,
managedMemorySize);
sanityCheckTotalFlinkMemory(config, flinkInternalMemory);
// derive jvm metaspace and overhead
final JvmMetaspaceAndOverhead jvmMetaspaceAndOverhead = deriveJvmMetaspaceAndOverheadFromTotalFlinkMemory(config, flinkInternalMemory.getTotalFlinkMemorySize());
return createTaskExecutorResourceSpec(config, flinkInternalMemory, jvmMetaspaceAndOverhead);
}
private static TaskExecutorResourceSpec deriveResourceSpecWithTotalFlinkMemory(final Configuration config) {
// derive flink internal memory from explicitly configured total flink memory
final MemorySize totalFlinkMemorySize = getTotalFlinkMemorySize(config);
final FlinkInternalMemory flinkInternalMemory = deriveInternalMemoryFromTotalFlinkMemory(config, totalFlinkMemorySize);
// derive jvm metaspace and overhead
final JvmMetaspaceAndOverhead jvmMetaspaceAndOverhead = deriveJvmMetaspaceAndOverheadFromTotalFlinkMemory(config, totalFlinkMemorySize);
return createTaskExecutorResourceSpec(config, flinkInternalMemory, jvmMetaspaceAndOverhead);
}
private static TaskExecutorResourceSpec deriveResourceSpecWithTotalProcessMemory(final Configuration config) {
// derive total flink memory from explicitly configured total process memory size
final MemorySize totalProcessMemorySize = getTotalProcessMemorySize(config);
final MemorySize jvmMetaspaceSize = getJvmMetaspaceSize(config);
final MemorySize jvmOverheadSize = deriveJvmOverheadWithFraction(config, totalProcessMemorySize);
final JvmMetaspaceAndOverhead jvmMetaspaceAndOverhead = new JvmMetaspaceAndOverhead(jvmMetaspaceSize, jvmOverheadSize);
if (jvmMetaspaceAndOverhead.getTotalJvmMetaspaceAndOverheadSize().getBytes() > totalProcessMemorySize.getBytes()) {
throw new IllegalConfigurationException(
"Sum of configured JVM Metaspace (" + jvmMetaspaceAndOverhead.metaspace.toString()
+ ") and JVM Overhead (" + jvmMetaspaceAndOverhead.overhead.toString()
+ ") exceed configured Total Process Memory (" + totalProcessMemorySize.toString() + ").");
}
final MemorySize totalFlinkMemorySize = totalProcessMemorySize.subtract(jvmMetaspaceAndOverhead.getTotalJvmMetaspaceAndOverheadSize());
// derive flink internal memory
final FlinkInternalMemory flinkInternalMemory = deriveInternalMemoryFromTotalFlinkMemory(config, totalFlinkMemorySize);
return createTaskExecutorResourceSpec(config, flinkInternalMemory, jvmMetaspaceAndOverhead);
}
private static JvmMetaspaceAndOverhead deriveJvmMetaspaceAndOverheadFromTotalFlinkMemory(
final Configuration config,
final MemorySize totalFlinkMemorySize) {
final MemorySize jvmMetaspaceSize = getJvmMetaspaceSize(config);
final MemorySize totalFlinkAndJvmMetaspaceSize = totalFlinkMemorySize.add(jvmMetaspaceSize);
final JvmMetaspaceAndOverhead jvmMetaspaceAndOverhead;
if (isTotalProcessMemorySizeExplicitlyConfigured(config)) {
final MemorySize totalProcessMemorySize = getTotalProcessMemorySize(config);
final MemorySize jvmOverheadSize = totalProcessMemorySize.subtract(totalFlinkAndJvmMetaspaceSize);
sanityCheckJvmOverhead(config, jvmOverheadSize, totalProcessMemorySize);
jvmMetaspaceAndOverhead = new JvmMetaspaceAndOverhead(jvmMetaspaceSize, jvmOverheadSize);
} else {
final MemorySize jvmOverheadSize = deriveJvmOverheadWithInverseFraction(config, totalFlinkAndJvmMetaspaceSize);
jvmMetaspaceAndOverhead = new JvmMetaspaceAndOverhead(jvmMetaspaceSize, jvmOverheadSize);
sanityCheckTotalProcessMemory(config, totalFlinkMemorySize, jvmMetaspaceAndOverhead);
}
return jvmMetaspaceAndOverhead;
}
private static FlinkInternalMemory deriveInternalMemoryFromTotalFlinkMemory(
final Configuration config,
final MemorySize totalFlinkMemorySize) {
final MemorySize frameworkHeapMemorySize = getFrameworkHeapMemorySize(config);
final MemorySize frameworkOffHeapMemorySize = getFrameworkOffHeapMemorySize(config);
final MemorySize taskOffHeapMemorySize = getTaskOffHeapMemorySize(config);
final MemorySize taskHeapMemorySize;
final MemorySize networkMemorySize;
final MemorySize managedMemorySize;
if (isTaskHeapMemorySizeExplicitlyConfigured(config)) {
// task heap memory is configured,
// derive managed memory first, leave the remaining to network memory and check against network min/max
taskHeapMemorySize = getTaskHeapMemorySize(config);
managedMemorySize = deriveManagedMemoryAbsoluteOrWithFraction(config, totalFlinkMemorySize);
final MemorySize totalFlinkExcludeNetworkMemorySize =
frameworkHeapMemorySize.add(frameworkOffHeapMemorySize).add(taskHeapMemorySize).add(taskOffHeapMemorySize).add(managedMemorySize);
if (totalFlinkExcludeNetworkMemorySize.getBytes() > totalFlinkMemorySize.getBytes()) {
throw new IllegalConfigurationException(
"Sum of configured Framework Heap Memory (" + frameworkHeapMemorySize.toString()
+ "), Framework Off-Heap Memory (" + frameworkOffHeapMemorySize.toString()
+ "), Task Heap Memory (" + taskHeapMemorySize.toString()
+ "), Task Off-Heap Memory (" + taskOffHeapMemorySize.toString()
+ ") and Managed Memory (" + managedMemorySize.toString()
+ ") exceed configured Total Flink Memory (" + totalFlinkMemorySize.toString() + ").");
}
networkMemorySize = totalFlinkMemorySize.subtract(totalFlinkExcludeNetworkMemorySize);
sanityCheckNetworkMemoryWithExplicitlySetTotalFlinkAndHeapMemory(config, networkMemorySize, totalFlinkMemorySize);
} else {
// task heap memory is not configured
// derive managed memory and network memory, leave the remaining to task heap memory
managedMemorySize = deriveManagedMemoryAbsoluteOrWithFraction(config, totalFlinkMemorySize);
if (isUsingLegacyNetworkConfigs(config)) {
networkMemorySize = getNetworkMemorySizeWithLegacyConfig(config);
} else {
networkMemorySize = deriveNetworkMemoryWithFraction(config, totalFlinkMemorySize);
}
final MemorySize totalFlinkExcludeTaskHeapMemorySize =
frameworkHeapMemorySize.add(frameworkOffHeapMemorySize).add(taskOffHeapMemorySize).add(managedMemorySize).add(networkMemorySize);
if (totalFlinkExcludeTaskHeapMemorySize.getBytes() > totalFlinkMemorySize.getBytes()) {
throw new IllegalConfigurationException(
"Sum of configured Framework Heap Memory (" + frameworkHeapMemorySize.toString()
+ "), Framework Off-Heap Memory (" + frameworkOffHeapMemorySize.toString()
+ "), Task Off-Heap Memory (" + taskOffHeapMemorySize.toString()
+ "), Managed Memory (" + managedMemorySize.toString()
+ ") and Network Memory (" + networkMemorySize.toString()
+ ") exceed configured Total Flink Memory (" + totalFlinkMemorySize.toString() + ").");
}
taskHeapMemorySize = totalFlinkMemorySize.subtract(totalFlinkExcludeTaskHeapMemorySize);
}
final FlinkInternalMemory flinkInternalMemory = new FlinkInternalMemory(
frameworkHeapMemorySize,
frameworkOffHeapMemorySize,
taskHeapMemorySize,
taskOffHeapMemorySize,
networkMemorySize,
managedMemorySize);
sanityCheckTotalFlinkMemory(config, flinkInternalMemory);
return flinkInternalMemory;
}
private static MemorySize deriveManagedMemoryAbsoluteOrWithFraction(final Configuration config, final MemorySize base) {
if (isManagedMemorySizeExplicitlyConfigured(config)) {
return getManagedMemorySize(config);
} else {
return deriveWithFraction("managed memory", base, getManagedMemoryRangeFraction(config));
}
}
private static MemorySize deriveNetworkMemoryWithFraction(final Configuration config, final MemorySize base) {
return deriveWithFraction("network memory", base, getNetworkMemoryRangeFraction(config));
}
private static MemorySize deriveNetworkMemoryWithInverseFraction(final Configuration config, final MemorySize base) {
return deriveWithInverseFraction("network memory", base, getNetworkMemoryRangeFraction(config));
}
private static MemorySize deriveJvmOverheadWithFraction(final Configuration config, final MemorySize base) {
return deriveWithFraction("jvm overhead memory", base, getJvmOverheadRangeFraction(config));
}
private static MemorySize deriveJvmOverheadWithInverseFraction(final Configuration config, final MemorySize base) {
return deriveWithInverseFraction("jvm overhead memory", base, getJvmOverheadRangeFraction(config));
}
private static MemorySize deriveWithFraction(
final String memoryDescription,
final MemorySize base,
final RangeFraction rangeFraction) {
final long relative = (long) (rangeFraction.fraction * base.getBytes());
return new MemorySize(capToMinMax(memoryDescription, relative, rangeFraction));
}
private static MemorySize deriveWithInverseFraction(
final String memoryDescription,
final MemorySize base,
final RangeFraction rangeFraction) {
checkArgument(rangeFraction.fraction < 1);
final long relative = (long) (rangeFraction.fraction / (1 - rangeFraction.fraction) * base.getBytes());
return new MemorySize(capToMinMax(memoryDescription, relative, rangeFraction));
}
private static long capToMinMax(
final String memoryDescription,
final long relative,
final RangeFraction rangeFraction) {
long size = relative;
if (size > rangeFraction.maxSize.getBytes()) {
LOG.info(
"The derived from fraction {} ({}b) is greater than its max value {}, max value will be used instead",
memoryDescription,
relative,
rangeFraction.maxSize);
size = rangeFraction.maxSize.getBytes();
} else if (size < rangeFraction.minSize.getBytes()) {
LOG.info(
"The derived from fraction {} ({}b) is less than its min value {}, max value will be used instead",
memoryDescription,
relative,
rangeFraction.minSize);
size = rangeFraction.minSize.getBytes();
}
return size;
}
private static MemorySize getFrameworkHeapMemorySize(final Configuration config) {
return getMemorySizeFromConfig(config, TaskManagerOptions.FRAMEWORK_HEAP_MEMORY);
}
private static MemorySize getFrameworkOffHeapMemorySize(final Configuration config) {
return getMemorySizeFromConfig(config, TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY);
}
private static MemorySize getTaskHeapMemorySize(final Configuration config) {
checkArgument(isTaskHeapMemorySizeExplicitlyConfigured(config));
return getMemorySizeFromConfig(config, TaskManagerOptions.TASK_HEAP_MEMORY);
}
private static MemorySize getTaskOffHeapMemorySize(final Configuration config) {
return getMemorySizeFromConfig(config, TaskManagerOptions.TASK_OFF_HEAP_MEMORY);
}
private static MemorySize getManagedMemorySize(final Configuration config) {
checkArgument(isManagedMemorySizeExplicitlyConfigured(config));
return getMemorySizeFromConfig(config, TaskManagerOptions.MANAGED_MEMORY_SIZE);
}
private static RangeFraction getManagedMemoryRangeFraction(final Configuration config) {
return getRangeFraction(MemorySize.ZERO, MemorySize.MAX_VALUE, TaskManagerOptions.MANAGED_MEMORY_FRACTION, config);
}
private static MemorySize getNetworkMemorySizeWithLegacyConfig(final Configuration config) {
checkArgument(isUsingLegacyNetworkConfigs(config));
@SuppressWarnings("deprecation")
final long numOfBuffers = config.getInteger(NettyShuffleEnvironmentOptions.NETWORK_NUM_BUFFERS);
final long pageSize = ConfigurationParserUtils.getPageSize(config);
return new MemorySize(numOfBuffers * pageSize);
}
private static RangeFraction getNetworkMemoryRangeFraction(final Configuration config) {
final MemorySize minSize = getMemorySizeFromConfig(config, TaskManagerOptions.NETWORK_MEMORY_MIN);
final MemorySize maxSize = getMemorySizeFromConfig(config, TaskManagerOptions.NETWORK_MEMORY_MAX);
return getRangeFraction(minSize, maxSize, TaskManagerOptions.NETWORK_MEMORY_FRACTION, config);
}
private static MemorySize getJvmMetaspaceSize(final Configuration config) {
return getMemorySizeFromConfig(config, TaskManagerOptions.JVM_METASPACE);
}
private static RangeFraction getJvmOverheadRangeFraction(final Configuration config) {
final MemorySize minSize = getMemorySizeFromConfig(config, TaskManagerOptions.JVM_OVERHEAD_MIN);
final MemorySize maxSize = getMemorySizeFromConfig(config, TaskManagerOptions.JVM_OVERHEAD_MAX);
return getRangeFraction(minSize, maxSize, TaskManagerOptions.JVM_OVERHEAD_FRACTION, config);
}
private static RangeFraction getRangeFraction(
final MemorySize minSize,
final MemorySize maxSize,
ConfigOption<Float> fractionOption,
final Configuration config) {
final double fraction = config.getFloat(fractionOption);
try {
return new RangeFraction(minSize, maxSize, fraction);
} catch (IllegalArgumentException e) {
throw new IllegalConfigurationException(
String.format(
"Inconsistently configured %s (%s) and its min (%s), max (%s) value",
fractionOption,
fraction,
minSize,
maxSize),
e);
}
}
private static MemorySize getTotalFlinkMemorySize(final Configuration config) {
checkArgument(isTotalFlinkMemorySizeExplicitlyConfigured(config));
return getMemorySizeFromConfig(config, TaskManagerOptions.TOTAL_FLINK_MEMORY);
}
private static MemorySize getTotalProcessMemorySize(final Configuration config) {
checkArgument(isTotalProcessMemorySizeExplicitlyConfigured(config));
return getMemorySizeFromConfig(config, TaskManagerOptions.TOTAL_PROCESS_MEMORY);
}
private static MemorySize getMemorySizeFromConfig(final Configuration config, final ConfigOption<MemorySize> option) {
try {
return config.get(option);
} catch (Throwable t) {
throw new IllegalConfigurationException("Cannot read memory size from config option '" + option.key() + "'.", t);
}
}
private static boolean isTaskHeapMemorySizeExplicitlyConfigured(final Configuration config) {
return config.contains(TaskManagerOptions.TASK_HEAP_MEMORY);
}
public static boolean isManagedMemorySizeExplicitlyConfigured(final Configuration config) {
return config.contains(TaskManagerOptions.MANAGED_MEMORY_SIZE);
}
private static boolean isUsingLegacyNetworkConfigs(final Configuration config) {
// use the legacy number-of-buffer config option only when it is explicitly configured and
// none of new config options is explicitly configured
@SuppressWarnings("deprecation")
final boolean legacyConfigured = config.contains(NettyShuffleEnvironmentOptions.NETWORK_NUM_BUFFERS);
return !config.contains(TaskManagerOptions.NETWORK_MEMORY_MIN) &&
!config.contains(TaskManagerOptions.NETWORK_MEMORY_MAX) &&
!config.contains(TaskManagerOptions.NETWORK_MEMORY_FRACTION) &&
legacyConfigured;
}
private static boolean isNetworkMemoryFractionExplicitlyConfigured(final Configuration config) {
return config.contains(TaskManagerOptions.NETWORK_MEMORY_FRACTION);
}
public static boolean isNetworkMemoryExplicitlyConfigured(final Configuration config) {
@SuppressWarnings("deprecation")
final boolean legacyConfigured = config.contains(NettyShuffleEnvironmentOptions.NETWORK_NUM_BUFFERS);
return config.contains(TaskManagerOptions.NETWORK_MEMORY_MAX) ||
config.contains(TaskManagerOptions.NETWORK_MEMORY_MIN) ||
config.contains(TaskManagerOptions.NETWORK_MEMORY_FRACTION) ||
legacyConfigured;
}
private static boolean isJvmOverheadFractionExplicitlyConfigured(final Configuration config) {
return config.contains(TaskManagerOptions.JVM_OVERHEAD_FRACTION);
}
private static boolean isTotalFlinkMemorySizeExplicitlyConfigured(final Configuration config) {
return config.contains(TaskManagerOptions.TOTAL_FLINK_MEMORY);
}
private static boolean isTotalProcessMemorySizeExplicitlyConfigured(final Configuration config) {
return config.contains(TaskManagerOptions.TOTAL_PROCESS_MEMORY);
}
private static void sanityCheckTotalFlinkMemory(final Configuration config, final FlinkInternalMemory flinkInternalMemory) {
if (isTotalFlinkMemorySizeExplicitlyConfigured(config)) {
final MemorySize configuredTotalFlinkMemorySize = getTotalFlinkMemorySize(config);
if (!configuredTotalFlinkMemorySize.equals(flinkInternalMemory.getTotalFlinkMemorySize())) {
throw new IllegalConfigurationException(
"Configured/Derived Flink internal memory sizes (total " + flinkInternalMemory.getTotalFlinkMemorySize().toString()
+ ") do not add up to the configured Total Flink Memory size (" + configuredTotalFlinkMemorySize.toString()
+ "). Configured/Derived Flink internal memory sizes are: "
+ "Framework Heap Memory (" + flinkInternalMemory.frameworkHeap.toString()
+ "), Framework Off-Heap Memory (" + flinkInternalMemory.frameworkOffHeap.toString()
+ "), Task Heap Memory (" + flinkInternalMemory.taskHeap.toString()
+ "), Task Off-Heap Memory (" + flinkInternalMemory.taskOffHeap.toString()
+ "), Network Memory (" + flinkInternalMemory.network.toString()
+ "), Managed Memory (" + flinkInternalMemory.managed.toString() + ").");
}
}
}
private static void sanityCheckTotalProcessMemory(
final Configuration config,
final MemorySize totalFlinkMemory,
final JvmMetaspaceAndOverhead jvmMetaspaceAndOverhead) {
final MemorySize derivedTotalProcessMemorySize =
totalFlinkMemory.add(jvmMetaspaceAndOverhead.metaspace).add(jvmMetaspaceAndOverhead.overhead);
if (isTotalProcessMemorySizeExplicitlyConfigured(config)) {
final MemorySize configuredTotalProcessMemorySize = getTotalProcessMemorySize(config);
if (!configuredTotalProcessMemorySize.equals(derivedTotalProcessMemorySize)) {
throw new IllegalConfigurationException(
"Configured/Derived memory sizes (total " + derivedTotalProcessMemorySize.toString()
+ ") do not add up to the configured Total Process Memory size (" + configuredTotalProcessMemorySize.toString()
+ "). Configured/Derived memory sizes are: "
+ "Total Flink Memory (" + totalFlinkMemory.toString()
+ "), JVM Metaspace (" + jvmMetaspaceAndOverhead.metaspace.toString()
+ "), JVM Overhead (" + jvmMetaspaceAndOverhead.overhead.toString() + ").");
}
}
}
private static void sanityCheckNetworkMemoryWithExplicitlySetTotalFlinkAndHeapMemory(
final Configuration config,
final MemorySize derivedNetworkMemorySize,
final MemorySize totalFlinkMemorySize) {
try {
sanityCheckNetworkMemory(config, derivedNetworkMemorySize, totalFlinkMemorySize);
} catch (IllegalConfigurationException e) {
throw new IllegalConfigurationException(
"If Total Flink, Task Heap and (or) Managed Memory sizes are explicitly configured then " +
"the Network Memory size is the rest of the Total Flink memory after subtracting all other " +
"configured types of memory, but the derived Network Memory is inconsistent with its configuration.",
e);
}
}
private static void sanityCheckNetworkMemory(
final Configuration config,
final MemorySize derivedNetworkMemorySize,
final MemorySize totalFlinkMemorySize) {
if (isUsingLegacyNetworkConfigs(config)) {
final MemorySize configuredNetworkMemorySize = getNetworkMemorySizeWithLegacyConfig(config);
if (!configuredNetworkMemorySize.equals(derivedNetworkMemorySize)) {
throw new IllegalConfigurationException(
"Derived Network Memory size (" + derivedNetworkMemorySize.toString()
+ ") does not match configured Network Memory size (" + configuredNetworkMemorySize.toString() + ").");
}
} else {
final RangeFraction networkRangeFraction = getNetworkMemoryRangeFraction(config);
if (derivedNetworkMemorySize.getBytes() > networkRangeFraction.maxSize.getBytes() ||
derivedNetworkMemorySize.getBytes() < networkRangeFraction.minSize.getBytes()) {
throw new IllegalConfigurationException("Derived Network Memory size ("
+ derivedNetworkMemorySize.toString() + ") is not in configured Network Memory range ["
+ networkRangeFraction.minSize.toString() + ", "
+ networkRangeFraction.maxSize.toString() + "].");
}
if (isNetworkMemoryFractionExplicitlyConfigured(config) &&
!derivedNetworkMemorySize.equals(totalFlinkMemorySize.multiply(networkRangeFraction.fraction))) {
LOG.info(
"The derived Network Memory size ({}) does not match " +
"the configured Network Memory fraction ({}) from the configured Total Flink Memory size ({}). " +
"The derived Network Memory size will be used.",
derivedNetworkMemorySize,
networkRangeFraction.fraction,
totalFlinkMemorySize);
}
}
}
private static void sanityCheckJvmOverhead(
final Configuration config,
final MemorySize derivedJvmOverheadSize,
final MemorySize totalProcessMemorySize) {
final RangeFraction jvmOverheadRangeFraction = getJvmOverheadRangeFraction(config);
if (derivedJvmOverheadSize.getBytes() > jvmOverheadRangeFraction.maxSize.getBytes() ||
derivedJvmOverheadSize.getBytes() < jvmOverheadRangeFraction.minSize.getBytes()) {
throw new IllegalConfigurationException("Derived JVM Overhead size ("
+ derivedJvmOverheadSize.toString() + ") is not in configured JVM Overhead range ["
+ jvmOverheadRangeFraction.minSize.toString() + ", "
+ jvmOverheadRangeFraction.maxSize.toString() + "].");
}
if (isJvmOverheadFractionExplicitlyConfigured(config) &&
!derivedJvmOverheadSize.equals(totalProcessMemorySize.multiply(jvmOverheadRangeFraction.fraction))) {
LOG.info(
"The derived JVM Overhead size ({}) does not match " +
"the configured JVM Overhead fraction ({}) from the configured Total Process Memory size ({}). " +
"The derived JVM OVerhead size will be used.",
derivedJvmOverheadSize,
jvmOverheadRangeFraction.fraction,
totalProcessMemorySize);
}
}
public static CPUResource getCpuCoresWithFallback(final Configuration config, double fallback) {
return getCpuCores(config, fallback);
}
private static CPUResource getCpuCores(final Configuration config) {
return getCpuCores(config, -1.0);
}
private static CPUResource getCpuCores(final Configuration config, double fallback) {
final double cpuCores;
if (config.contains(TaskManagerOptions.CPU_CORES)) {
cpuCores = config.getDouble(TaskManagerOptions.CPU_CORES);
} else if (fallback > 0.0) {
cpuCores = fallback;
} else {
cpuCores = config.getInteger(TaskManagerOptions.NUM_TASK_SLOTS);
}
if (cpuCores <= 0) {
throw new IllegalConfigurationException(
String.format(
"TaskExecutors need to be started with a positive number of CPU cores. Please configure %s accordingly.",
TaskManagerOptions.CPU_CORES.key()));
}
return new CPUResource(cpuCores);
}
private static TaskExecutorResourceSpec createTaskExecutorResourceSpec(
final Configuration config,
final FlinkInternalMemory flinkInternalMemory,
final JvmMetaspaceAndOverhead jvmMetaspaceAndOverhead) {
return new TaskExecutorResourceSpec(
getCpuCores(config),
flinkInternalMemory.frameworkHeap,
flinkInternalMemory.frameworkOffHeap,
flinkInternalMemory.taskHeap,
flinkInternalMemory.taskOffHeap,
flinkInternalMemory.network,
flinkInternalMemory.managed,
jvmMetaspaceAndOverhead.metaspace,
jvmMetaspaceAndOverhead.overhead);
}
private static class RangeFraction {
final MemorySize minSize;
final MemorySize maxSize;
final double fraction;
RangeFraction(final MemorySize minSize, final MemorySize maxSize, final double fraction) {
this.minSize = minSize;
this.maxSize = maxSize;
this.fraction = fraction;
checkArgument(minSize.getBytes() <= maxSize.getBytes(), "min value must be less or equal to max value");
checkArgument(fraction >= 0 && fraction < 1, "fraction must be in range [0, 1)");
}
}
private static class FlinkInternalMemory {
final MemorySize frameworkHeap;
final MemorySize frameworkOffHeap;
final MemorySize taskHeap;
final MemorySize taskOffHeap;
final MemorySize network;
final MemorySize managed;
FlinkInternalMemory(
final MemorySize frameworkHeap,
final MemorySize frameworkOffHeap,
final MemorySize taskHeap,
final MemorySize taskOffHeap,
final MemorySize network,
final MemorySize managed) {
this.frameworkHeap = checkNotNull(frameworkHeap);
this.frameworkOffHeap = checkNotNull(frameworkOffHeap);
this.taskHeap = checkNotNull(taskHeap);
this.taskOffHeap = checkNotNull(taskOffHeap);
this.network = checkNotNull(network);
this.managed = checkNotNull(managed);
}
MemorySize getTotalFlinkMemorySize() {
return frameworkHeap.add(frameworkOffHeap).add(taskHeap).add(taskOffHeap).add(network).add(managed);
}
}
private static class JvmMetaspaceAndOverhead {
final MemorySize metaspace;
final MemorySize overhead;
JvmMetaspaceAndOverhead(final MemorySize jvmMetaspace, final MemorySize jvmOverhead) {
this.metaspace = checkNotNull(jvmMetaspace);
this.overhead = checkNotNull(jvmOverhead);
}
MemorySize getTotalJvmMetaspaceAndOverheadSize() {
return metaspace.add(overhead);
}
}
public static Configuration getConfigurationMapLegacyTaskManagerHeapSizeToConfigOption(
final Configuration configuration, ConfigOption<MemorySize> configOption) {
if (configuration.contains(configOption)) {
return configuration;
}
return getLegacyTaskManagerHeapMemoryIfExplicitlyConfigured(configuration).map(legacyHeapSize -> {
final Configuration copiedConfig = new Configuration(configuration);
copiedConfig.set(configOption, legacyHeapSize);
LOG.info(
"'{}' is not specified, use the configured deprecated task manager heap value ({}) for it.",
configOption.key(),
legacyHeapSize);
return copiedConfig;
}).orElse(configuration);
}
@SuppressWarnings("deprecation")
private static Optional<MemorySize> getLegacyTaskManagerHeapMemoryIfExplicitlyConfigured(final Configuration configuration) {
String totalProcessEnv = System.getenv("FLINK_TM_HEAP");
if (totalProcessEnv != null) {
try {
return Optional.of(MemorySize.parse(totalProcessEnv));
} catch (Throwable t) {
throw new IllegalConfigurationException("Cannot read total process memory size from environment variable value "
+ totalProcessEnv + ".", t);
}
}
if (configuration.contains(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY)) {
return Optional.of(getMemorySizeFromConfig(configuration, TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY));
}
if (configuration.contains(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY_MB)) {
final long legacyHeapMemoryMB = configuration.getInteger(TaskManagerOptions.TASK_MANAGER_HEAP_MEMORY_MB);
if (legacyHeapMemoryMB < 0) {
throw new IllegalConfigurationException("Configured total process memory size ("
+ legacyHeapMemoryMB + "MB) must not be less than 0.");
}
return Optional.of(new MemorySize(legacyHeapMemoryMB << 20)); // megabytes to bytes;
}
return Optional.empty();
}
}
| [
"\"FLINK_TM_HEAP\""
]
| []
| [
"FLINK_TM_HEAP"
]
| [] | ["FLINK_TM_HEAP"] | java | 1 | 0 | |
python/ray/serve/tests/test_router.py | """
Unit tests for the router class. Please don't add any test that will involve
controller or the actual replica wrapper, use mock if necessary.
"""
import asyncio
import pytest
import ray
from ray.serve.common import RunningReplicaInfo
from ray.serve.router import Query, ReplicaSet, RequestMetadata
from ray._private.test_utils import SignalActor
pytestmark = pytest.mark.asyncio
@pytest.fixture
def ray_instance():
# Note(simon):
# This line should be not turned on on master because it leads to very
# spammy and not useful log in case of a failure in CI.
# To run locally, please use this instead.
# SERVE_LOG_DEBUG=1 pytest -v -s test_api.py
# os.environ["SERVE_LOG_DEBUG"] = "1" <- Do not uncomment this.
ray.init(num_cpus=16)
yield
ray.shutdown()
def mock_task_runner():
@ray.remote(num_cpus=0)
class TaskRunnerMock:
def __init__(self):
self.query = None
self.queries = []
@ray.method(num_returns=2)
async def handle_request(self, request_metadata, *args, **kwargs):
self.query = Query(args, kwargs, request_metadata)
self.queries.append(self.query)
return b"", "DONE"
def get_recent_call(self):
return self.query
def get_all_calls(self):
return self.queries
def clear_calls(self):
self.queries = []
async def reconfigure(self, user_config):
return
return TaskRunnerMock.remote()
@pytest.fixture
def task_runner_mock_actor():
yield mock_task_runner()
async def test_replica_set(ray_instance):
signal = SignalActor.remote()
@ray.remote(num_cpus=0)
class MockWorker:
_num_queries = 0
@ray.method(num_returns=2)
async def handle_request(self, request):
self._num_queries += 1
await signal.wait.remote()
return b"", "DONE"
async def num_queries(self):
return self._num_queries
# We will test a scenario with two replicas in the replica set.
rs = ReplicaSet(
"my_deployment",
asyncio.get_event_loop(),
)
replicas = [
RunningReplicaInfo(
deployment_name="my_deployment",
replica_tag=str(i),
actor_handle=MockWorker.remote(),
max_concurrent_queries=1,
)
for i in range(2)
]
rs.update_running_replicas(replicas)
# Send two queries. They should go through the router but blocked by signal
# actors.
query = Query([], {}, RequestMetadata("request-id", "endpoint"))
first_ref = await rs.assign_replica(query)
second_ref = await rs.assign_replica(query)
# These should be blocked by signal actor.
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get([first_ref, second_ref], timeout=1)
# Each replica should have exactly one inflight query. Let make sure the
# queries arrived there.
for replica in replicas:
while await replica.actor_handle.num_queries.remote() != 1:
await asyncio.sleep(1)
# Let's try to send another query.
third_ref_pending_task = asyncio.get_event_loop().create_task(
rs.assign_replica(query)
)
# We should fail to assign a replica, so this coroutine should still be
# pending after some time.
await asyncio.sleep(0.2)
assert not third_ref_pending_task.done()
# Let's unblock the two replicas
await signal.send.remote()
assert await first_ref == "DONE"
assert await second_ref == "DONE"
# The third request should be unblocked and sent to first replica.
# This meas we should be able to get the object ref.
third_ref = await third_ref_pending_task
# Now we got the object ref, let's get it result.
await signal.send.remote()
assert await third_ref == "DONE"
# Finally, make sure that one of the replica processed the third query.
num_queries_set = {
(await replica.actor_handle.num_queries.remote()) for replica in replicas
}
assert num_queries_set == {2, 1}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| []
| []
| [
"SERVE_LOG_DEBUG"
]
| [] | ["SERVE_LOG_DEBUG"] | python | 1 | 0 | |
src/blade/util.py | # Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Huan Yu <[email protected]>
# Feng chen <[email protected]>
# Yi Wang <[email protected]>
# Chong peng <[email protected]>
# Date: October 20, 2011
"""
This is the util module which provides some helper functions.
"""
from __future__ import absolute_import
from __future__ import print_function
import ast
import errno
import fcntl
import hashlib
import inspect
import json
import os
import signal
import string
import subprocess
import sys
import zipfile
_IN_PY3 = sys.version_info[0] == 3
# In python 2, cPickle is much faster than pickle, but in python 3, pickle is
# reimplemented in C extension and then the standardalone cPickle is removed.
if _IN_PY3:
import pickle # pylint: disable=unused-import
else:
# pyright: reportMissingImports=false
import cPickle as pickle # pylint: disable=import-error, unused-import
def md5sum_bytes(content):
"""Calculate md5sum of a byte string."""
assert isinstance(content, bytes), 'Invalid type %s' % type(content)
m = hashlib.md5()
m.update(content)
return m.hexdigest()
def md5sum_str(content):
"""Calculate md5sum of a string."""
assert isinstance(content, str), 'Invalid type %s' % type(content)
return md5sum_bytes(content.encode('utf-8'))
def md5sum_file(file_name):
"""Calculate md5sum of a file."""
with open(file_name, 'rb') as f:
digest = md5sum_bytes(f.read())
return digest
def md5sum(obj):
"""Calculate md5sum of a string-like object"""
if isinstance(obj, bytes):
return md5sum_bytes(obj)
if isinstance(obj, str):
return md5sum_str(obj)
raise TypeError('Invalid type %s' % type(str))
def lock_file(filename):
"""lock file."""
try:
fd = os.open(filename, os.O_CREAT | os.O_RDWR)
old_fd_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_fd_flags | fcntl.FD_CLOEXEC)
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd, 0
except IOError as ex_value:
return -1, ex_value.errno
def unlock_file(fd):
"""unlock file."""
try:
fcntl.flock(fd, fcntl.LOCK_UN)
os.close(fd)
except IOError:
pass
def var_to_list(var):
"""Normalize a singlar or list to list."""
if isinstance(var, list):
return var[:]
if var is None:
return []
return [var]
def var_to_list_or_none(var):
"""Similar to var_to_list but keeps the None unchanged"""
if var is None:
return var
return var_to_list(var)
def stable_unique(seq):
"""unique a seq and keep its original order"""
# See http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def to_string(text):
if text is None:
return text
if isinstance(text, str):
return text
if isinstance(text, bytes):
return text.decode('utf-8')
raise TypeError('Unknown type %s' % type(text))
def get_cwd():
"""get_cwd
os.getcwd() doesn't work because it will follow symbol link.
os.environ.get('PWD') doesn't work because it won't reflect os.chdir().
So in practice we simply use system('pwd') to get current working directory.
"""
p = subprocess.Popen(['pwd'], stdout=subprocess.PIPE, shell=True)
return to_string(p.communicate()[0].strip())
def find_file_bottom_up(name, from_dir=None):
"""Find the specified file/dir from from_dir bottom up until found or failed.
Returns abspath if found, or empty if failed.
"""
if from_dir is None:
from_dir = get_cwd()
finding_dir = os.path.abspath(from_dir)
while True:
path = os.path.join(finding_dir, name)
if os.path.exists(path):
return path
if finding_dir == '/':
break
finding_dir = os.path.dirname(finding_dir)
return ''
def path_under_dir(path, dir):
"""Check whether a path is under the dir.
Both path and dir must be normalized, and they must be both relative or relative path.
"""
return dir == '.' or path == dir or path.startswith(dir) and path[len(dir)] == os.path.sep
def mkdir_p(path):
"""Make directory if it does not exist."""
try:
if not os.path.isdir(path):
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _echo(stdout, stderr):
"""Echo messages to stdout and stderr."""
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
def shell(cmd, env=None):
if isinstance(cmd, list):
cmdline = ' '.join(cmd)
else:
cmdline = cmd
p = subprocess.Popen(cmdline,
env=env,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
if p.returncode:
if p.returncode != -signal.SIGINT:
# Error
_echo(stdout, stderr)
else:
# Warnings
_echo(stdout, stderr)
return p.returncode
def run_command(args, **kwargs):
"""Run a command without echo, return returncode, stdout and stderr (always as string)."""
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
if _IN_PY3:
r = subprocess.run(args, universal_newlines=True, **kwargs)
return r.returncode, r.stdout, r.stderr
else:
p = subprocess.Popen(args, universal_newlines=True, **kwargs)
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
def load_scm(build_dir):
revision = url = 'unknown'
path = os.path.join(build_dir, 'scm.json')
if os.path.exists(path):
with open(path) as f:
scm = json.load(f)
revision, url = scm['revision'], scm['url']
return revision, url
def environ_add_path(env, key, path):
"""Add path to PATH link environments, such as PATH, LD_LIBRARY_PATH, etc"""
old = env.get(key)
if old:
env[key] = path + ':' + old
else:
env[key] = path
def cpu_count():
try:
import multiprocessing # pylint: disable=import-outside-toplevel
return multiprocessing.cpu_count()
except ImportError:
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
_TRANS_TABLE = (str if _IN_PY3 else string).maketrans(',-/:.+*', '_______')
def regular_variable_name(name):
"""convert some name to a valid identifier name"""
return name.translate(_TRANS_TABLE)
# Some python 2/3 compatibility helpers.
if _IN_PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def exec_file_content(filename, content, globals, locals):
"""Execute code content as filename"""
# pylint: disable=exec-used
exec(compile(content, filename, 'exec'), globals, locals)
def exec_file(filename, globals, locals):
"""Same as python2's execfile builtin function, but python3 has no execfile"""
# pylint: disable=exec-used
with open(filename, 'rb') as f:
exec_file_content(filename, f.read(), globals, locals)
def eval_file(filepath):
"""Load a value from file.
Safely evaluate an expression node or a string containing a Python literal or container display.
The string or node provided may only consist of the following Python literal structures:
strings, bytes, numbers, tuples, lists, dicts, sets, booleans, and None.
"""
return ast.literal_eval(open(filepath).read())
def source_location(filename):
"""Return source location of current call stack from filename"""
full_filename = filename
lineno = 1
# See https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
frame = inspect.currentframe()
while frame:
if frame.f_code.co_filename.endswith(filename):
full_filename = frame.f_code.co_filename
lineno = frame.f_lineno
break
frame = frame.f_back
return '%s:%s' % (full_filename, lineno)
def calling_source_location(skip=0):
"""Return source location of current call stack, skip specified levels (not include itself)."""
skip += 1 # This function itself is excluded.
skipped = 0
frame = inspect.currentframe()
while frame:
if skipped == skip:
return '%s:%s' % (frame.f_code.co_filename, frame.f_lineno)
frame = frame.f_back
skipped += 1
raise ValueError('Invalid value "%d" for "skip"' % skip)
def parse_command_line(argv):
"""Simple command line parsing.
options can only be passed as the form of `--name=value`, any other arguments are treated as
normal arguments.
Returns:
tuple(options: dict, args: list)
"""
options = {}
args = []
for arg in argv:
if arg.startswith('--'):
pos = arg.find('=')
if pos < 0:
args.append(arg)
continue
name = arg[2:pos]
value = arg[pos+1:]
options[name] = value
else:
args.append(arg)
return options, args
def open_zip_file_for_write(filename, compression_level):
"""Open a zip file for writing with specified compression level."""
compression = zipfile.ZIP_DEFLATED
if sys.version_info.major < 3 or sys.version_info.major == 3 and sys.version_info.minor < 7:
if compression_level == "0":
compression = zipfile.ZIP_STORED
return zipfile.ZipFile(filename, 'w', compression, allowZip64=True)
# pylint: disable=unexpected-keyword-arg
return zipfile.ZipFile(filename, 'w', compression, compresslevel=int(compression_level), allowZip64=True)
| []
| []
| [
"PWD"
]
| [] | ["PWD"] | python | 1 | 0 | |
samples/udp.go | package main
import (
"flag"
"fmt"
"os"
log "github.com/sirupsen/logrus"
"github.com/kidoman/go-steam"
)
func main() {
debug := flag.Bool("debug", false, "debug")
flag.Parse()
if *debug {
log.SetLevel(log.DebugLevel)
}
addr := os.Getenv("ADDR")
if addr == "" {
fmt.Println("Please set ADDR.")
return
}
server, err := steam.Connect(addr)
if err != nil {
panic(err)
}
defer server.Close()
ping, err := server.Ping()
if err != nil {
fmt.Printf("steam: could not ping %v: %v\n", addr, err)
return
}
fmt.Printf("steam: ping to %v: %v\n", addr, ping)
info, err := server.Info()
if err != nil {
fmt.Printf("steam: could not get server info from %v: %v\n", addr, err)
return
}
fmt.Printf("steam: info of %v: %v\n", addr, info)
playersInfo, err := server.PlayersInfo()
if err != nil {
fmt.Printf("steam: could not get players info from %v: %v\n", addr, err)
return
}
if len(playersInfo.Players) > 0 {
fmt.Printf("steam: player infos for %v:\n", addr)
for _, player := range playersInfo.Players {
fmt.Printf("steam: %v %v\n", player.Name, player.Score)
}
}
}
func must(err error) {
if err != nil {
panic(err)
}
}
| [
"\"ADDR\""
]
| []
| [
"ADDR"
]
| [] | ["ADDR"] | go | 1 | 0 | |
src/main/java/frc/vision/AdbBridge.java | package frc.vision;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
/**
* AdbBridge interfaces to an Android Debug Bridge (adb) binary, which is needed to communicate to Android devices over
* USB.
*
* adb binary provided by https://github.com/Spectrum3847/RIOdroid
*/
public class AdbBridge {
Path bin_location_;
public final static Path DEFAULT_LOCATION = Paths.get("/usr/bin/adb");
public AdbBridge() {
Path adb_location;
String env_val = System.getenv("FRC_ADB_LOCATION");
if (env_val == null || "".equals(env_val)) {
adb_location = DEFAULT_LOCATION;
} else {
adb_location = Paths.get(env_val);
}
bin_location_ = adb_location;
}
public AdbBridge(Path location) {
bin_location_ = location;
}
private boolean runCommand(String args) {
Runtime r = Runtime.getRuntime();
String cmd = bin_location_.toString() + " " + args;
try {
Process p = r.exec(cmd);
p.waitFor();
} catch (IOException e) {
System.err.println("AdbBridge: Could not run command " + cmd);
e.printStackTrace();
return false;
} catch (InterruptedException e) {
System.err.println("AdbBridge: Could not run command " + cmd);
e.printStackTrace();
return false;
}
return true;
}
public void start() {
System.out.println("Starting adb");
runCommand("start");
}
public void stop() {
System.out.println("Stopping adb");
runCommand("kill-server");
}
public void restartAdb() {
System.out.println("Restarting adb");
stop();
start();
}
public void portForward(int local_port, int remote_port) {
runCommand("forward tcp:" + local_port + " tcp:" + remote_port);
}
public void reversePortForward(int remote_port, int local_port) {
runCommand("reverse tcp:" + remote_port + " tcp:" + local_port);
}
public void restartApp() {
System.out.println("Restarting app");
runCommand("shell am force-stop com.team254.cheezdroid \\; "
+ "am start com.team254.cheezdroid/com.team254.cheezdroid.VisionTrackerActivity");
}
}
| [
"\"FRC_ADB_LOCATION\""
]
| []
| [
"FRC_ADB_LOCATION"
]
| [] | ["FRC_ADB_LOCATION"] | java | 1 | 0 | |
project/neuralnetwork.py | #!/usr/bin/env python
"""
Neural Network implementation.
Credit
------
I primarily learned the neural network algorithm from below sources:
[ICL] Imperial College London, Mathematics for Machine Learning Specialization, https://www.coursera.org/specializations/mathematics-machine-learning
[DA] deeplearning.ai, Deep Learning Specialization, https://www.coursera.org/specializations/deep-learning
[IG] Ian Goodfellow, Yoshua Bengio and Aaron Courville, Deep Learning, MIT Press, 2016
bibtex entry for the [IG] above:
@book{Goodfellow-et-al-2016,
title={Deep Learning},
author={Ian Goodfellow and Yoshua Bengio and Aaron Courville},
publisher={MIT Press},
note={\\url={http://www.deeplearningbook.org}},
year={2016}
}
Adam optimization code is based on the below paper:
[DK] Diederik P. Kingma and Jimmy Lei Ba, ADAM: A METHOD FOR STOCHASTIC OPTIMIZATION, https://arxiv.org/abs/1412.6980, 2015
I crafted the code from scratch based on the algorithm that I learned, so please email me if you see an error
in this code
* Model class was inspired by the Keras SequenceModel and Keras layer.
__author__ = "Hide Inada"
__copyright__ = "Copyright 2018, Hide Inada"
__license__ = "The MIT License"
__email__ = "[email protected]"
"""
import sys
import os
import logging
from numba import jit
import math
import numpy as np
from .activationfunction import ActivationFunction as af
from .costfunction import CostFunction as cf
from .optimizer import Optimizer as opt
from .weightpersistence import WeightPersistence as wp
from .weightparameter import WeightParameter as wparam
from .convolve import Convolve as conv
from .convolve import _calculate_target_matrix_dimension
log = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) # Change the 2nd arg to INFO to suppress debug logging
@jit(nopython=True)
def forward_prop_affine_transform(a_prev, weight, bias):
"""
Apply affine transformation for forward prop
Parameters
----------
a_prev: ndarray
Previous layer's activation
weight: ndarray
Weight
bias: ndarray
Bias
Returns
-------
z: ndarray
Affine transform
"""
return a_prev.dot(weight) + bias
class LayerType():
"""
Type of layers for neural network
"""
DENSE = 0
CONV = 1
class Layer():
"""
Holds meta-information of a single layer of neural network
"""
def __init__(self, num_units, activation=af.RELU, dropout=1.0):
self.num_units = num_units # number of units on the layer.
self.activation = activation # the activation function for the layer.
self.layer_type = LayerType.DENSE # type of the layer
self.dropout = dropout # Dropout. For now, this is valid for dense layer only.
class ConvLayer(Layer):
def __init__(self, kernel_shape, channels, strides=(1, 1), use_padding=True, activation=af.RELU, flatten=False,
layer_dim=None):
"""
Initialize kernel parameters.
Parameters
----------
kernel_shape: tuple
Shape of kernel specified with a tuple (height, row, number of channels)
strides: tuple
Step size in each axis
use_padding: bool
True if m should be zero-padded before convolution. This is to keep the output matrix the same size.
False if no padding should be applied before convolution.
flatten: bool
Output a flattened layer.
layer_dim: tuple
Dimension of the layer. This is specified only for the input layer which is the pseudo conv layer.
For other layers, this is calculated from other parameters during init.
"""
self.kernel_shape = kernel_shape
self.channels = channels
self.strides = strides
self.use_padding = use_padding
self.activation = activation
self.layer_type = LayerType.CONV
self.flatten = flatten
self.layer_dim = layer_dim
class Model():
"""
Container for holding information for multiple layers
"""
def __init__(self, num_input=0, layer_dim=None):
"""
Initialize the model.
Parameters
----------
num_input: int
Number of elements in each sample
input_shape: tuple
Shape of each sample, e.g. (64, 64, 3) for RGB.
if both num_input and input_shape are specified, input_shape takes precedence.
"""
self.layers = list()
if layer_dim is not None:
self.layers.append(
ConvLayer(layer_dim=layer_dim, kernel_shape=None, channels=layer_dim[2], activation=af.NONE))
else:
self.layers.append(Layer(num_units=num_input, activation=af.NONE))
def add(self, layer):
"""
Add a single layer to the model.
Parameters
----------
layer: Layer
A single layer of the network
"""
self.layers.append(layer)
class NeuralNetwork():
"""
Neural Network
Variables used in the class
---------------------------
weight: matrix
Weight stored in matrix associated with each layer.
Size of the matrix is unit count of the previous layer multiplied by the unit count of the current layer
bias: vector
Bias stored in matrix associated with each layer.
Size of the vector is unit unit count of the current layer
z: matrix
Affine transformation applied to previous layer's activation
z = a.T w. In this code, a is a matrix with each row holding all parameters for a single point.
Therefore, z = a w is used.
a: matrix
Output of an activation function with z as an input. Activation functions include sigmoid and ReLU.
Notes
-----
Layer number starts with 0 with 0 being the input. However, input is not counted as a layer following a convention.
Weight and bias only exist for layers 1 and above.
"""
MODE_FIT = 0
MODE_PREDICT = 1
def _init_weight_forward_prop_data_list(self):
"""
Allocate list for weight, bias, z, a, gradient of weight, gradient of bias.
Allocate matrix and vector as weight and bias for each layer.
Notes
-----
With the exception of a[0] which is used to access input, all others have valid values only with indices
greater than or equal to 1.
"""
def list_with_n_elements(n):
"""
Helper function to generate a list with n elements.
The primary use for this is to instantiate a list with one item as our neural network uses
1-based index for all except for accessing the input as layer 0 activation.
Parameters
----------
n: int
Number of elements
Returns
-------
out: list
list with n elements. All elements are set to None
"""
return [None] * n
self.weight = list_with_n_elements(1)
self.gradient_weight = list_with_n_elements(1)
self.bias = list_with_n_elements(1)
self.gradient_bias = list_with_n_elements(1)
self.z = list_with_n_elements(self.num_layers + 1)
self.a = list_with_n_elements(self.num_layers + 1)
self.dropout_vector = list_with_n_elements(self.num_layers + 1)
self.kernel_parameter = list_with_n_elements(self.num_layers + 1)
self.layer_locked = [False] * (self.num_layers + 1)
# Create a list for holding references to moment vectors for ADAM
if self.optimizer == opt.ADAM:
self.mt_weight = list_with_n_elements(1) # First moment vector for weight
self.mt_bias = list_with_n_elements(1) # First moment vector for bias
self.vt_weight = list_with_n_elements(1) # Second moment vector for weight
self.vt_bias = list_with_n_elements(1) # Second moment vector for bias
# Allocate weight and bias for each layer
for i in range(self.num_layers):
w = None # to suppress a wrong IDE warning
b = None
this_layer = self.model.layers[i + 1]
prev_layer = self.model.layers[i]
if self.model.layers[i + 1].layer_type == LayerType.DENSE:
num_units_this_layer = this_layer.num_units
num_units_prev_layer = prev_layer.num_units
self.dropout_vector[i] = np.ones((num_units_prev_layer))
# w initialization below is following the recommendation on http://cs231n.github.io/neural-networks-2/
# min 100 to ensure that weights are small when the number of units is a few.
if self.weight_parameter is None:
w = np.random.randn(num_units_prev_layer, num_units_this_layer) * 0.1
else:
if self.weight_parameter.init_type == wparam.NORMAL:
w = np.random.normal(self.weight_parameter.mean, self.weight_parameter.stddev,
(num_units_prev_layer,
num_units_this_layer)) * self.weight_parameter.multiplier
elif self.weight_parameter.init_type == wparam.UNIFORM:
w = np.random.uniform(self.weight_parameter.mean, self.weight_parameter.stddev,
(num_units_prev_layer,
num_units_this_layer)) * self.weight_parameter.multiplier
elif self.weight_parameter.init_type == wparam.ZERO:
w = np.zeros((num_units_prev_layer, num_units_this_layer))
elif self.weight_parameter.init_type == wparam.LAYER_UNIT_COUNT_PROPORTIONAL:
w = np.random.randn(num_units_prev_layer, num_units_this_layer) * math.sqrt(
1.0 / num_units_prev_layer) * self.weight_parameter.multiplier
elif self.weight_parameter.init_type == wparam.LAYER_UNIT_COUNT_PROPORTIONAL2:
w = np.random.randn(num_units_prev_layer, num_units_this_layer) * math.sqrt(
2.0 / num_units_prev_layer) * self.weight_parameter.multiplier
# Bias
if self.bias_parameter is None:
b = np.zeros((1, num_units_this_layer))
else:
if self.bias_parameter.init_type == wparam.NORMAL:
b = np.random.normal(self.bias_parameter.mean, self.bias_parameter.stddev,
(1, num_units_this_layer)) * self.bias_parameter.multiplier
elif self.bias_parameter.init_type == wparam.UNIFORM:
b = np.random.uniform(self.bias_parameter.mean, self.bias_parameter.stddev,
(1, num_units_this_layer)) * self.bias_parameter.multiplier
elif self.bias_parameter.init_type == wparam.ZERO:
b = np.zeros((1, num_units_this_layer))
else: # if current layer is conv
if prev_layer.layer_dim is None:
log.error("Fatal error. Dimension of the previous layer is set to None.")
raise ValueError("Fatal error. Dimension of the previous layer is set to None.")
prev_channels = prev_layer.channels
prev_layer_height = prev_layer.layer_dim[0]
prev_layer_width = prev_layer.layer_dim[1]
kernel_shape = this_layer.kernel_shape # 0:height, 1:width
channels = this_layer.channels
strides = this_layer.strides
use_padding = this_layer.use_padding
kernel_height = kernel_shape[0]
kernel_width = kernel_shape[1]
padding_height = (kernel_shape[0] // 2) * 2
padding_width = (kernel_shape[1] // 2) * 2
target_height = (prev_layer_height + padding_height - kernel_height) // strides[0] + 1
target_width = (prev_layer_width + padding_width - kernel_width) // strides[1] + 1
this_layer.layer_dim = (target_height, target_width, channels)
this_layer.num_units = target_height * target_width * channels
if self.weight_parameter is None:
w = np.random.randn(kernel_shape[0], kernel_shape[1], prev_channels, channels) * 0.01
else:
if self.weight_parameter.init_type == wparam.NORMAL:
w = np.random.normal(self.weight_parameter.mean, self.weight_parameter.stddev,
(kernel_shape[0], kernel_shape[1], prev_channels,
channels)) * self.weight_parameter.multiplier
elif self.weight_parameter.init_type == wparam.UNIFORM:
w = np.random.uniform(self.weight_parameter.mean, self.weight_parameter.stddev,
(kernel_shape[0], kernel_shape[1], prev_channels,
channels)) * self.weight_parameter.multiplier
elif self.weight_parameter.init_type == wparam.ZERO:
w = np.zeros((kernel_shape[0], kernel_shape[1], prev_channels, channels))
# Bias
if self.bias_parameter is None:
b = np.zeros((channels))
else:
if self.bias_parameter.init_type == wparam.NORMAL:
b = np.random.normal(self.bias_parameter.mean, self.bias_parameter.stddev,
(channels)) * self.bias_parameter.multiplier
elif self.bias_parameter.init_type == wparam.UNIFORM:
b = np.random.uniform(self.bias_parameter.mean, self.bias_parameter.stddev,
(channels)) * self.bias_parameter.multiplier
elif self.bias_parameter.init_type == wparam.ZERO:
b = np.zeros((channels))
self.weight.append(w)
self.gradient_weight.append(np.zeros(w.shape))
self.bias.append(b)
self.gradient_bias.append(np.zeros(b.shape))
if self.optimizer == opt.ADAM:
self.mt_weight.append(np.zeros(w.shape))
self.mt_bias.append(np.zeros(b.shape))
self.vt_weight.append(np.zeros(w.shape))
self.vt_bias.append(np.zeros(b.shape))
def __init__(self, model, cost_function=cf.MEAN_SQUARED_ERROR, learning_rate=0.001, optimizer=opt.BATCH,
optimizer_settings=None, batch_size=1, use_layer_from=None, weight_parameter=None,
bias_parameter=None):
"""
Initialize the class.
Parameters
----------
num_units_per_layer: list
Number of units for each layer including input
learning_rate: float
Controls the speed of gradient descent. At the end of each each epoch,
gradient is multiplied with the learning rate before subtracted from weight.
optimizer: int
Optimizer type
Optimizer settings: Optimizer parameters object
Optimizer parameters
batch_size: int
Size of the batch
use_layer_from: list
Dictionary containing a list of objects to share one or more layers with in the read-only mode.
Namely, during the backprop of this object, weights on those layers will not be updated.
Example:
use_layer_from=[{"model": nn_discriminator,
"layer_map": [{"from": 3, "to": 1},
{"from": 4, "to": 2}]}],
Use nn_discriminator object's 3rd and 4th layers as the 1st and 2nd layer of this model.
weight_parameter: WeightParameter
Contains parameters to initialize layer weights
bias_parameter: WeightParameter
Contains parameters to initialize layer biases
"""
self.mode = NeuralNetwork.MODE_FIT
self.model = model
self.optimizer = optimizer
self.optimizer_settings = optimizer_settings
self.cost_function = cost_function
self.learning_rate = learning_rate
self._dataset_size = 0 # Dataset size: Size of samples fed to fit(). Dataset size to be initialized in fit()
self.batch_size = batch_size
self.use_layer_from = use_layer_from
self.weight_parameter = weight_parameter
self.bias_parameter = bias_parameter
self.num_layers = len(model.layers) - 1 # To exclude the input layer
self._init_weight_forward_prop_data_list()
def _forward_prop(self, x, output_layer_index=-1):
"""
Forward propagation
Parameters
----------
x: ndarray
Input data
output_layer_index: int
1-based layer index to output. If set to -1, forward prop proceeds to the last layer.
This is used to output the activation of an intermediate layer.
Returns
-------
out: ndarray
Predicted values
"""
a = x # For the first layer, assign input as the activation
self.a[0] = a
if output_layer_index != -1:
loop_count = output_layer_index
else:
loop_count = self.num_layers
for i in range(loop_count):
a = self._forward_prop_one_layer(a, i + 1)
return (a)
# forward prop
def _forward_prop_one_layer(self, a_prev, current_layer_index):
"""
Forward propagate one layer by applying affine transformation and activation
Parameters
----------
a_prev: ndarray
Previous layer's activation
current_layer_index: int
Index of current layer. Index 0 is input.
activation: str
Activation function
"""
this_layer = self.model.layers[current_layer_index]
prev_layer = self.model.layers[current_layer_index-1]
if this_layer.layer_type == LayerType.CONV:
kernel = self.weight[current_layer_index]
bias = self.bias[current_layer_index]
strides = this_layer.strides
use_padding = this_layer.use_padding
z = conv.convolve_tensor_dataset_2(a_prev, kernel, bias, strides=strides, use_padding=use_padding)
else: # Dense layer
# Affine transformation
# z = a_prev.dot(self.weight[current_layer_index]) + self.bias[current_layer_index]
if prev_layer.dropout != 1.0:
if self.mode == NeuralNetwork.MODE_FIT:
num_activation_prev = self.dropout_vector[current_layer_index-1].shape[0]
dropout = prev_layer.dropout
num_units_to_drop = int(num_activation_prev * (1-dropout))
index_of_units_to_drop = np.random.choice(num_activation_prev, num_units_to_drop)
dropout_vector = np.ones((num_activation_prev)) # reset to 1 first
dropout_vector[index_of_units_to_drop] = 0
self.dropout_vector[current_layer_index - 1] = dropout_vector
a_prev_tilda = a_prev * self.dropout_vector[current_layer_index - 1]
else: # if predict, use all nodes but multiply by the dropout
a_prev_tilda = a_prev * prev_layer.dropout
else:
a_prev_tilda = a_prev
z = forward_prop_affine_transform(a_prev_tilda, self.weight[current_layer_index], self.bias[current_layer_index])
self.z[current_layer_index] = z
# Activation
if this_layer.activation == af.SIGMOID:
a = af.sigmoid(z)
elif this_layer.activation == af.RELU:
a = af.relu(z)
elif this_layer.activation == af.LEAKY_RELU:
a = af.leaky_relu(z)
else:
a = af.none(z)
if this_layer.layer_type == LayerType.CONV and this_layer.flatten == True:
a_shape = a.shape
a = a.reshape((a_shape[0], a_shape[1] * a_shape[2] * a_shape[3]))
self.a[current_layer_index] = a
return (a)
def predict(self, x):
"""
Predict based on the input x
Parameters
----------
x: ndarray
Input data
Returns
-------
out: ndarray
Predicted values
"""
self.mode = NeuralNetwork.MODE_PREDICT
return self._forward_prop(x)
def predict_intermediate(self, x, output_layer_index):
"""
Feedforward up to the layer specified.
Parameters
----------
x: ndarray
Input data
output_layer_index: int
1-based layer index to output. If set to -1, forward prop proceeds to the last layer.
This is used to output the activation of an intermediate layer.
Returns
-------
out: ndarray
Predicted values
"""
return self._forward_prop(x, output_layer_index)
def _backprop(self, x, y, y_hat):
"""
Backpropagation
x: ndarray
Input
y: ndarray
Ground-truth
y_hat: ndarray
Predicted values
Notes
-----
Gradient is calculated using the multivariable chain rule.
A variable 'derivative_cumulative' carries this over from the last layer all the way to the first layer.
"""
dj_wrt_a = self.derivative_j_wrt_a(y, y_hat, cost_function=self.cost_function)
derivative_cumulative = dj_wrt_a
for i in range(self.num_layers):
derivative_cumulative = self._backprop_one_layer(derivative_cumulative, self.num_layers - i)
self._update_weight()
def _backprop_one_layer(self, derivative_cumulative, layer_index):
"""
Backpropagate one layer
derivative_cumulative: ndarray
Accumulated derivative from the last layer in the network.
At the entry point of this method, the shape of the array
is the same as the shape of the layer (dataset size by the number of units for the layer).
layer_index: int
Current layer index
Returns
-------
derivative_cumulative: ndarray
Updated accumulated derivative from the last layer
"""
current_batch_size = derivative_cumulative.shape[0]
log.debug("Backprop: Layer index: %d" % (layer_index))
if layer_index <= self.num_layers - 1: # for a 3 layer network, if index <= 2
above_layer = self.model.layers[layer_index + 1]
else:
above_layer = None
this_layer = self.model.layers[layer_index]
prev_layer = self.model.layers[layer_index-1]
if this_layer.layer_type == LayerType.CONV:
if above_layer is None:
raise ValueError("Unexpected value for above layer. Value is None.")
if above_layer.layer_type == LayerType.DENSE:
derivative_cumulative = derivative_cumulative.reshape(
(derivative_cumulative.shape[0], this_layer.layer_dim[0],
this_layer.layer_dim[1],
this_layer.layer_dim[2]
))
# Derivative of a with respect to z
if this_layer.activation == af.SIGMOID:
pa_pz = self.sigmoid_derivative_with_z(layer_index)
elif this_layer.activation == af.RELU:
pa_pz = self.relu_derivative_with_z(layer_index)
elif this_layer.activation == af.LEAKY_RELU:
pa_pz = self.leaky_relu_derivative_with_z(layer_index)
else:
pa_pz = self.none_derivative_with_z(layer_index)
cumulative_derivative_to_z = derivative_cumulative * pa_pz
# Note that the shape is still the same as current layer.
# Derivative of z with respect to weight
if this_layer.layer_type == LayerType.DENSE:
pz_pw = self.partial_z_wrt_partial_w(layer_index)
cumulative_derivative_to_w = pz_pw.T.dot(cumulative_derivative_to_z)
# At this point, shape of cumulative_derivative_to_w is the same as the weight of this layer
cumulative_derivative_to_w /= current_batch_size
self.gradient_weight[layer_index] = cumulative_derivative_to_w
# Derivative of z with respect to bias
pz_pb = self.partial_z_wrt_partial_b(layer_index)
cumulative_derivative_to_b = np.sum(cumulative_derivative_to_z * pz_pb, axis=0)
# At this point, shape of cumulative_derivative_to_b is the same as the bias of this layer
cumulative_derivative_to_b /= current_batch_size
self.gradient_bias[layer_index] = cumulative_derivative_to_b
# Derivative of z with respect to previous layer's activation
pz_pa_prev = self.partial_z_wrt_partial_a_prev(layer_index)
cumulative_derivative_to_a_prev = cumulative_derivative_to_z.dot(pz_pa_prev.T)
if prev_layer.dropout != 1.0:
dropout_vector = self.dropout_vector[layer_index - 1]
cumulative_derivative_to_a_prev *= dropout_vector
else: # if Conv
"""
See refer to my documentation to see how these calculations are derived:
https://hideyukiinada.github.io/cnn_backprop_strides2.html
"""
# Calculate ∂L/∂a_prev
# Step 1. Interweave ∂L/∂z with zeros
# Determine the number of output channels
channels = cumulative_derivative_to_z.shape[3]
# dataset_size = cumulative_derivative_to_z.shape[0]
h = cumulative_derivative_to_z.shape[1]
w = cumulative_derivative_to_z.shape[2]
strides = this_layer.strides[0] # FIXME for non-square matrix
if strides > 1:
# l1 = list()
# for i in range(dataset_size):
# l2 = list()
# for c in range(channels): # shape = (dataset_size, h, w)
# padded = conv.zero_interweave(cumulative_derivative_to_z[i, :, :, c], strides - 1)
# l2.append(padded)
#
# l2np = np.array(l2)
# l2combined = np.concatenate((l2np), axis=2)
# l2stacked = l2combined.reshape((h * 2, w * 2, channels))
# l1.append(l2stacked)
#
# l1np = np.array(l1)
# l1combined = np.concatenate((l1np),axis=0)
# partial_l_partial_z_interweaved = l1combined.reshape((dataset_size, h * 2, w * 2, channels))
partial_l_partial_z_interweaved = conv.zero_interweave_dataset(cumulative_derivative_to_z, strides - 1)
else: # if strides == 1
partial_l_partial_z_interweaved = cumulative_derivative_to_z
# Step 2. Zeropad
# This step is done in convolve_tensor_dataset_back_2()
# Step 3. Flip W vertically and horizontally
weights = self.weight[layer_index]
weights_flipped = conv.flip_weight(weights)
# Convolute partial_l_partial_z_padded * weights_flipped
cumulative_derivative_to_a_prev = conv.convolve_tensor_dataset_back_2(partial_l_partial_z_interweaved,
weights_flipped, use_padding=True)
# Calculate Calculate ∂L/∂W
# Step 1. Interweave ∂L/∂z with zeros
# Reuse partial_l_partial_z_interweaved
# Step 2. Zero-pad a_prev
a_prev = self.a[layer_index - 1]
kernel_width = self.gradient_weight[layer_index].shape[0]
kernel_height = self.gradient_weight[layer_index].shape[0]
pad_h = kernel_height // 2
pad_w = kernel_width // 2
# Step 3. Convolve two matrices
cumulative_derivative_to_w = conv.convolve_two_datasets_calc_mean(a_prev,
partial_l_partial_z_interweaved,
use_padding=True, padding=(pad_h, pad_w))
self.gradient_weight[layer_index] = cumulative_derivative_to_w
# Calculate Calculate ∂L/∂bias
pz_pb = 1.0
cumulative_derivative_to_b = np.sum(cumulative_derivative_to_z * pz_pb)
cumulative_derivative_to_b /= current_batch_size
self.gradient_bias[layer_index] = cumulative_derivative_to_b
return cumulative_derivative_to_a_prev # Shape is the same as the previous layer's activation.
def _update_weight(self):
"""
Update weight and bias of the network by subtracting the gradient of weight and bias multiplied by the learning
rate.
"""
for i in range(self.num_layers):
layer_index = self.num_layers - i
if self.optimizer == opt.ADAM:
beta1 = self.optimizer_settings.beta1
beta2 = self.optimizer_settings.beta2
beta1_to_t = self.optimizer_settings.beta1_to_t
beta2_to_t = self.optimizer_settings.beta2_to_t
epsilon = self.optimizer_settings.epsilon
self.mt_weight[layer_index] = beta1 * self.mt_weight[layer_index] + \
(1 - beta1) * self.gradient_weight[layer_index]
self.vt_weight[layer_index] = beta2 * self.vt_weight[layer_index] + \
(1 - beta2) * self.gradient_weight[layer_index] ** 2
self.mt_bias[layer_index] = beta1 * self.mt_bias[layer_index] + \
(1 - beta1) * self.gradient_bias[layer_index]
self.vt_bias[layer_index] = beta2 * self.vt_bias[layer_index] + \
(1 - beta2) * self.gradient_bias[layer_index] ** 2
mt_weight_hat = self.mt_weight[layer_index] / (1.0 - beta1_to_t)
vt_weight_hat = self.vt_weight[layer_index] / (1.0 - beta2_to_t)
mt_bias_hat = self.mt_bias[layer_index] / (1.0 - beta1_to_t)
vt_bias_hat = self.vt_bias[layer_index] / (1.0 - beta2_to_t)
if self.layer_locked[layer_index] is False: # Do not update if the layer is borrowed from other model.
self.weight[layer_index] -= self.learning_rate * mt_weight_hat / (
np.sqrt(vt_weight_hat) + epsilon)
self.bias[layer_index] -= self.learning_rate * mt_bias_hat / (
np.sqrt(vt_bias_hat) + epsilon)
else:
if self.layer_locked[layer_index] is False:
self.weight[layer_index] -= self.learning_rate * self.gradient_weight[layer_index]
self.bias[layer_index] -= self.learning_rate * self.gradient_bias[layer_index]
if self.optimizer == opt.ADAM:
self.optimizer_settings.beta1_to_t *= self.optimizer_settings.beta1
self.optimizer_settings.beta2_to_t *= self.optimizer_settings.beta2
def fit(self, x, y, epochs, verbose=True, interval=1):
"""
Train the model
Parameters
----------
x: ndarray
Input
y: ndarray
Ground-truth
epochs: int
Number of epochs to iterate
verbose: bool
Show the cost for each epoch
interval: int
Number of epochs to show the cost if verbose is set to true
"""
def process_verbose(y, y_hat, batch_index, batch_loop_count, epoch_index, epoch_size, current_batch_size):
"""
Helper function to output cost at regular intervals
Parameters
----------
y: ndarray
Ground-truth
y_hat: ndarray
Predicted values
batch_index: int
0-based batch index within the current epoch
batch_loop_count: int
Total number of batch loops within a epoch
epoch_index: int
0-based epoch index
epoch_size: int
Number of epochs
current_batch_size : int
Dataset size in this batch
"""
cost = -1
if self.cost_function == cf.CROSS_ENTROPY:
cost = cf.mean_cross_entropy(y, y_hat)
elif self.cost_function == cf.MEAN_SQUARED_ERROR:
cost = cf.mean_squared_error(y, y_hat)
if (self.batch_size >= 32):
print("[Epoch %d/%d - Batch %d/%d] Cost: %.07f. Batch size: %d" %
(epoch_index + 1, epoch_size, batch_index + 1, batch_loop_count, cost, current_batch_size ))
else:
if (batch_index % 100 == 0):
print("[Epoch %d/%d - Batch %d/%d] Cost: %.07f. Batch size: %d" %
(epoch_index + 1, epoch_size, batch_index + 1, batch_loop_count, cost, current_batch_size ))
self._dataset_size = x.shape[0]
self.mode = NeuralNetwork.MODE_FIT
# check to see if we should use layers from other object
if self.use_layer_from is not None:
for other_object in self.use_layer_from:
other_model = other_object["model"]
mappings = other_object["layer_map"]
for mapping in mappings:
source = mapping["from"]
target = mapping["to"]
# print("Using layer %d from other model as this model's layer %d" % (source, target))
self.weight[target] = other_model.weight[source]
self.bias[target] = other_model.bias[source]
self.layer_locked[target] = True
if self.optimizer in [opt.SGD, opt.ADAM]:
if self.optimizer == opt.ADAM:
self.optimizer_settings.beta1_to_t = self.optimizer_settings.beta1
self.optimizer_settings.beta2_to_t = self.optimizer_settings.beta2
for i in range(epochs):
next_k = 0
loop_count = int(self._dataset_size / self.batch_size) # for m = 5, batch_size = 2, this results in [0, 1]
current_batch_size = 0
for j in range(loop_count):
current_batch_size = self.batch_size
k = j * current_batch_size
next_k = k + current_batch_size
x_sub = x[k:next_k]
y_sub = y[k:next_k]
y_hat = self._forward_prop(x_sub)
if verbose:
process_verbose(y_sub, y_hat, j, loop_count, i, epochs, current_batch_size )
self._backprop(x_sub, y_sub, y_hat)
# remainder
last_batch_size = x.shape[0] - next_k
if last_batch_size > 0:
k = next_k
x_sub = x[k:k + last_batch_size ]
y_sub = y[k:k + last_batch_size ]
y_hat = self._forward_prop(x_sub)
if verbose:
process_verbose(y_sub, y_hat, j + 1, loop_count + 1, i, epochs, last_batch_size )
self._backprop(x_sub, y_sub, y_hat)
else: # Batch gradient
current_batch_size = x.shape[0]
for i in range(epochs):
y_hat = self._forward_prop(x)
if verbose:
if self.cost_function == cf.CROSS_ENTROPY:
cost = cf.mean_cross_entropy(y, y_hat)
elif self.cost_function == cf.MEAN_SQUARED_ERROR:
cost = cf.mean_squared_error(y, y_hat)
if ((i + 1) % interval == 0):
print("[%d/%d epochs] Cost: %.07f" % (i + 1, epochs, cost))
self._backprop(x, y, y_hat)
# Partial derivatives
def derivative_j_wrt_a(self, y, y_hat, cost_function):
"""
Calculate the derivative of cost with respect to y hat (or the activation of the last layer).
Parameters
----------
y: ndarray
Ground-truth
y_hat: ndarray
Predicted values
Returns
-------
out: ndarray
The partial derivative of cost with respect to y hat
Raises
------
ValueError
If unsupported cost function is specified
"""
if cost_function == cf.CROSS_ENTROPY:
d = cf.d_cross_entropy(y, y_hat)
elif cost_function == cf.MEAN_SQUARED_ERROR:
d = cf.d_squared_error(y, y_hat)
else:
raise ValueError("Unsupported cost function")
return (d) # we will multiply by 1/m later
def sigmoid_derivative_with_z(self, layer_index):
"""
Calculate the derivative of activation using the value of z used in forward prop.
Parameters
----------
layer_index: int
Layer index to be used in retrieving the value of z
Returns
-------
out: ndarray
Partial derivative of a with respect to z
"""
return af.d_sigmoid(self.z[layer_index])
def relu_derivative_with_z(self, layer_index):
"""
Calculate the derivative of activation using the value of z used in forward prop.
Parameters
----------
layer_index: int
Layer index to be used in retrieving the value of z
Returns
-------
out: ndarray
Partial derivative of a with respect to z
"""
return af.d_relu(self.z[layer_index])
def leaky_relu_derivative_with_z(self, layer_index):
"""
Calculate the derivative of activation using the value of z used in forward prop.
Parameters
----------
layer_index: int
Layer index to be used in retrieving the value of z
Returns
-------
out: ndarray
Partial derivative of a with respect to z
"""
return af.d_leaky_relu(self.z[layer_index])
def none_derivative_with_z(self, layer_index):
"""
Dummy derivative function to return 1.
Parameters
----------
layer_index: int
Layer index to be used in retrieving the value of z
Returns
-------
out: ndarray
Partial derivative of a with respect to z
"""
return af.d_none(self.z[layer_index])
def partial_z_wrt_partial_w(self, current_layer_index):
"""
Calculate the partial derivative of z with respect to weight.
Parameters
----------
layer_index: int
Layer index to be used in retrieving the value of z
Returns
-------
out: ndarray
Partial derivative of z with respect to weight
Notes
-----
Since z = w a_prev + b, the partial derivative is a_prev.
"""
a_prev = self.a[current_layer_index - 1]
return a_prev
def partial_z_wrt_partial_a_prev(self, current_layer_index):
"""
Calculate the partial derivative of z with respect to activation of the last layer.
Parameters
----------
layer_index: int
Layer index to be used in retrieving the value of z
Returns
-------
out: ndarray
Partial derivative of z with respect to activation of the last layer
Notes
-----
Since z = w a_prev + b, the partial derivative is z.
"""
w = self.weight[current_layer_index]
return w
def partial_z_wrt_partial_b(self, current_layer_index):
"""
Calculate the partial derivative of z with respect to bias.
Parameters
----------
layer_index: int
Layer index. Not currently used.
Returns
-------
out: ndarray
Partial derivative of z with respect to bias
Notes
-----
Since z = w a_prev + b, the partial derivative is 1.
"""
return 1
# Loading and saving weights
def load(self, file_path):
"""
Load the matrix weight from a file specified with file_path.
Parameters
---------
file_path: Pathlib.path
Path to save the weights
Returns
-------
Weights of the model
Raises
------
File not found
"""
weight, bias = wp.load(file_path)
self.weight = weight
self.bias = bias
def save(self, file_path):
"""
Save the matrix weight in a file specified by file_path.
Parameters
----------
file_path: Pathlib.path
Path to save the weights
"""
# index corresponds to a layer. layer 0 does not have weight, but wp is aware of this.
wp.save(file_path,
{
"weight": self.weight,
"bias": self.bias
}
)
| []
| []
| [
"LOGLEVEL"
]
| [] | ["LOGLEVEL"] | python | 1 | 0 | |
src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/_params.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
import os
import platform
from argcomplete.completers import FilesCompleter
from azure.cli.core.commands import (
CliArgumentType,
register_cli_argument,
register_extra_cli_argument)
from azure.cli.core.commands.parameters import tags_type
from azure.cli.core.commands.validators import validate_file_or_dict
from azure.cli.core.commands.parameters import (
enum_choice_list,
file_type,
resource_group_name_type,
get_one_of_subscription_locations,
get_resource_name_completion_list)
from azure.cli.command_modules.acs._validators import validate_create_parameters, validate_ssh_key, validate_list_of_integers
from azure.cli.command_modules.acs._validators import validate_k8s_client_version
from azure.cli.command_modules.acs._validators import validate_k8s_version
from azure.cli.command_modules.acs._validators import validate_linux_host_name
def _compute_client_factory(**_):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(ResourceType.MGMT_COMPUTE)
def get_vm_sizes(location):
return list(_compute_client_factory().virtual_machine_sizes.list(location))
def get_vm_size_completion_list(prefix, action, parsed_args, **kwargs): # pylint: disable=unused-argument
try:
location = parsed_args.location
except AttributeError:
location = get_one_of_subscription_locations()
result = get_vm_sizes(location)
return [r.name for r in result]
def _get_default_install_location(exe_name):
system = platform.system()
if system == 'Windows':
program_files = os.environ.get('ProgramFiles')
if not program_files:
return None
install_location = '{}\\{}.exe'.format(program_files, exe_name)
elif system == 'Linux' or system == 'Darwin':
install_location = '/usr/local/bin/{}'.format(exe_name)
else:
install_location = None
return install_location
def _get_feature_in_preview_message():
return "Feature in preview, only in " + ", ".join(regionsInPreview) + ". "
regionsInPreview = ["ukwest", "uksouth", "westcentralus", "westus2", "canadaeast", "canadacentral", "westindia", "southindia", "centralindia"]
regionsInProd = ["australiasoutheast", "northeurope", "brazilsouth", "australiaeast", "japaneast", "northcentralus", "westus", "eastasia", "eastus2", "southcentralus", "southeastasia", "eastus", "westeurope", "centralus", "japanwest"]
name_arg_type = CliArgumentType(options_list=('--name', '-n'), metavar='NAME')
orchestratorTypes = ["Custom", "DCOS", "Kubernetes", "Swarm", "DockerCE"]
k8s_version_arg_type = CliArgumentType(options_list=('--kubernetes-version', '-k'), metavar='KUBERNETES_VERSION')
storageProfileTypes = ["StorageAccount", "ManagedDisks"]
register_cli_argument('acs', 'tags', tags_type)
register_cli_argument('acs', 'name', arg_type=name_arg_type, configured_default='acs',
help="ACS cluster name. You can configure the default using `az configure --defaults acs=<name>`",
completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'))
register_cli_argument('acs', 'resource_group', arg_type=resource_group_name_type)
register_cli_argument('acs', 'orchestrator_type', options_list=('--orchestrator-type', '-t'), help='DockerCE - ' + _get_feature_in_preview_message(), **enum_choice_list(orchestratorTypes))
# some admin names are prohibited in acs, such as root, admin, etc. Because we have no control on the orchestrators, so default to a safe name.
register_cli_argument('acs', 'admin_username', options_list=('--admin-username',), default='azureuser', required=False)
register_cli_argument('acs', 'api_version', options_list=('--api-version',), required=False, help=_get_feature_in_preview_message() + 'Use API version of ACS to perform az acs operations. Available options: 2017-01-31, 2017-07-01. Default: the latest version for the location')
register_cli_argument('acs', 'dns_name_prefix', options_list=('--dns-prefix', '-d'), help='default use the format of <clustername>-<resourcegroupname>-<subid>, will trim the length and replace sensitive characters if needed')
register_cli_argument('acs', 'container_service_name', options_list=('--name', '-n'), help='The name of the container service', completer=get_resource_name_completion_list('Microsoft.ContainerService/ContainerServices'))
register_cli_argument('acs', 'ssh_key_value', required=False, help='SSH key file value or key file path.', type=file_type, default=os.path.join('~', '.ssh', 'id_rsa.pub'), completer=FilesCompleter())
register_cli_argument('acs create', 'name', arg_type=name_arg_type, validator=validate_ssh_key)
register_extra_cli_argument('acs create', 'generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing', validator=validate_create_parameters)
register_cli_argument('acs create', 'master_profile', options_list=('--master-profile', '-m'), type=validate_file_or_dict, help=_get_feature_in_preview_message() + 'The file or dictionary representation of the master profile. Note it will override any master settings once set')
register_cli_argument('acs create', 'master_vm_size', completer=get_vm_size_completion_list, help=_get_feature_in_preview_message())
register_cli_argument('acs create', 'master_osdisk_size', type=int, help=_get_feature_in_preview_message() + 'The disk size for master pool vms. Unit in GB. Default: corresponding vmsize disk size')
register_cli_argument('acs create', 'master_vnet_subnet_id', type=str, help=_get_feature_in_preview_message() + 'The custom vnet subnet id. Note agent need to used the same vnet if master set. Default: ""')
register_cli_argument('acs create', 'master_first_consecutive_static_ip', type=str, help=_get_feature_in_preview_message() + 'The first consecutive ip used to specify static ip block.')
register_cli_argument('acs create', 'master_storage_profile', help=_get_feature_in_preview_message() + 'Default: varies based on Orchestrator', **enum_choice_list(storageProfileTypes))
register_cli_argument('acs create', 'agent_count', type=int, help='Set default number of agents for the agent pools. Note, for DC/OS clusters you will also get 1 or 2 public agents in addition to these selected masters.')
register_cli_argument('acs create', 'agent_profiles', options_list=('--agent-profiles', '-a'), type=validate_file_or_dict, help=_get_feature_in_preview_message() + 'The file or dictionary representation of the agent profiles. Note it will override any agent settings once set')
register_cli_argument('acs create', 'agent_vm_size', completer=get_vm_size_completion_list, help='Set the default size for agent pools vms.')
register_cli_argument('acs create', 'agent_osdisk_size', type=int, help=_get_feature_in_preview_message() + 'Set the default disk size for agent pools vms. Unit in GB. Default: corresponding vmsize disk size')
register_cli_argument('acs create', 'agent_vnet_subnet_id', type=str, help=_get_feature_in_preview_message() + 'Set the default custom vnet subnet id for agent pools. Note agent need to used the same vnet if master set. Default: ""')
register_cli_argument('acs create', 'agent_ports', type=validate_list_of_integers, help=_get_feature_in_preview_message() + 'Set the default ports exposed on the agent pools. Only usable for non-Kubernetes. Default: 8080,4000,80')
register_cli_argument('acs create', 'agent_storage_profile', help=_get_feature_in_preview_message() + 'Set default storage profile for agent pools. Default: varies based on Orchestrator', **enum_choice_list(storageProfileTypes))
register_cli_argument('acs create', 'windows', action='store_true', help='If true, set the default osType of agent pools to be Windows.')
register_cli_argument('acs create', 'validate', action='store_true', help='Generate and validate the ARM template without creating any resources')
register_cli_argument('acs create', 'orchestrator_version', options_list=('--orchestrator-version',), help=_get_feature_in_preview_message() + 'Use Orchestrator Version to specify the semantic version for your choice of orchestrator.')
register_cli_argument('acs', 'disable_browser', help='Do not open browser after opening a proxy to the cluster web user interface')
register_cli_argument('acs dcos browse', 'name', name_arg_type)
register_cli_argument('acs dcos browse', 'ssh_key_file',
required=False,
help='Path to an SSH key file to use.',
type=file_type,
default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter())
register_cli_argument('acs dcos install-cli', 'install_location',
options_list=('--install-location',),
default=_get_default_install_location('dcos'))
register_cli_argument('acs kubernetes install-cli', 'install_location',
options_list=('--install-location',),
default=_get_default_install_location('kubectl'))
# TODO: Make this derive from the cluster object, instead of just preset values
register_cli_argument('acs kubernetes get-credentials', 'dns_prefix')
register_cli_argument('acs kubernetes get-credentials', 'location')
register_cli_argument('acs kubernetes get-credentials', 'path',
options_list=('--file', '-f',),
default=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
type=file_type,
completer=FilesCompleter())
register_cli_argument('acs kubernetes get-credentials', 'ssh_key_file',
required=False,
help='Path to an SSH key file to use.',
type=file_type,
default=os.path.join('~', '.ssh', 'id_rsa'),
completer=FilesCompleter())
register_cli_argument('acs scale', 'new_agent_count', type=int, help='The number of agents for the cluster')
register_cli_argument('acs create', 'service_principal', help='Service principal for making calls into Azure APIs. If not set, auto generate a new service principal of Contributor role, and save it locally for reusing')
register_cli_argument('acs create', 'client_secret', help='Client secret to use with the service principal for making calls to Azure APIs')
# Managed Clusters flags configuration
register_cli_argument('aks', 'name', help='Resource name for the managed cluster', arg_type=name_arg_type)
register_cli_argument('aks', 'resource_group', arg_type=resource_group_name_type)
register_cli_argument('aks', 'tags', tags_type)
register_cli_argument('aks create', 'ssh_key_value', required=False,
help='SSH key file value or key file path.', type=file_type,
default=os.path.join('~', '.ssh', 'id_rsa.pub'), completer=FilesCompleter(),
validator=validate_ssh_key)
register_cli_argument('aks create', 'name', arg_type=name_arg_type, validator=validate_linux_host_name)
register_extra_cli_argument('aks create', 'generate_ssh_keys', action='store_true',
help='Generate SSH public and private key files if missing',
validator=validate_create_parameters)
register_cli_argument('aks create', 'kubernetes_version', arg_type=k8s_version_arg_type,
validator=validate_k8s_version)
register_cli_argument('aks create', 'admin_username', options_list=('--admin-username', '-u'))
register_cli_argument('aks create', 'node_vm_size', options_list=('--node-vm-size', '-s'),
completer=get_vm_size_completion_list)
register_cli_argument('aks create', 'node_count', type=int, options_list=('--node-count', '-c'))
register_cli_argument('aks create', 'dns_name_prefix', options_list=('--dns-name-prefix', '-p'))
register_cli_argument('aks delete', 'resource_name', help='Resource name for the managed cluster', arg_type=name_arg_type)
register_cli_argument('aks get-credentials', 'path', options_list=('--file', '-f',),
default=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
type=file_type, completer=FilesCompleter())
register_cli_argument('aks get-credentials', 'admin', options_list=('--admin', '-a'), default=False)
register_cli_argument('aks get-versions', 'resource_name', help='Resource name for the managed cluster', arg_type=name_arg_type)
register_cli_argument('aks scale', 'node_count', type=int, options_list=('--node-count', '-c'))
register_cli_argument('aks upgrade', 'kubernetes_version', arg_type=k8s_version_arg_type,
validator=validate_k8s_version)
register_cli_argument('aks upgrade', 'name', arg_type=name_arg_type, validator=validate_linux_host_name)
register_cli_argument('aks wait', 'resource_name', options_list=('--name', '-n'))
register_cli_argument('aks install-cli', 'install_location', options_list=('--install-location',),
default=_get_default_install_location('kubectl'))
register_cli_argument('aks install-cli', 'client_version', options_list=('--client-version',),
validator=validate_k8s_client_version)
| []
| []
| [
"ProgramFiles"
]
| [] | ["ProgramFiles"] | python | 1 | 0 | |
python/setup.py | import argparse
import errno
import glob
import io
import logging
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import zipfile
from itertools import chain
from itertools import takewhile
import urllib.error
import urllib.parse
import urllib.request
logger = logging.getLogger(__name__)
# Ideally, we could include these files by putting them in a
# MANIFEST.in or using the package_data argument to setup, but the
# MANIFEST.in gets applied at the very beginning when setup.py runs
# before these files have been created, so we have to move the files
# manually.
SUPPORTED_PYTHONS = [(3, 6), (3, 7), (3, 8)]
SUPPORTED_BAZEL = (3, 2, 0)
ROOT_DIR = os.path.dirname(__file__)
BUILD_JAVA = os.getenv("RAY_INSTALL_JAVA") == "1"
PICKLE5_SUBDIR = os.path.join("ray", "pickle5_files")
THIRDPARTY_SUBDIR = os.path.join("ray", "thirdparty_files")
CLEANABLE_SUBDIRS = [PICKLE5_SUBDIR, THIRDPARTY_SUBDIR]
exe_suffix = ".exe" if sys.platform == "win32" else ""
# .pyd is the extension Python requires on Windows for shared libraries.
# https://docs.python.org/3/faq/windows.html#is-a-pyd-file-the-same-as-a-dll
pyd_suffix = ".pyd" if sys.platform == "win32" else ".so"
pickle5_url = ("https://github.com/pitrou/pickle5-backport/archive/"
"c0c1a158f59366696161e0dffdd10cfe17601372.tar.gz")
# NOTE: The lists below must be kept in sync with ray/BUILD.bazel.
ray_files = [
"ray/core/src/ray/thirdparty/redis/src/redis-server" + exe_suffix,
"ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
"ray/core/src/plasma/plasma_store_server" + exe_suffix,
"ray/_raylet" + pyd_suffix,
"ray/core/src/ray/gcs/gcs_server" + exe_suffix,
"ray/core/src/ray/raylet/raylet" + exe_suffix,
"ray/streaming/_streaming.so",
]
if BUILD_JAVA:
ray_files.append("ray/jars/ray_dist.jar")
# These are the directories where automatically generated Python protobuf
# bindings are created.
generated_python_directories = [
"ray/core/generated",
"ray/streaming/generated",
]
optional_ray_files = ["ray/nightly-wheels.yaml"]
ray_autoscaler_files = [
"ray/autoscaler/aws/example-full.yaml",
"ray/autoscaler/azure/example-full.yaml",
"ray/autoscaler/azure/azure-vm-template.json",
"ray/autoscaler/azure/azure-config-template.json",
"ray/autoscaler/gcp/example-full.yaml",
"ray/autoscaler/local/example-full.yaml",
"ray/autoscaler/kubernetes/example-full.yaml",
"ray/autoscaler/kubernetes/kubectl-rsync.sh",
"ray/autoscaler/ray-schema.json"
]
ray_project_files = [
"ray/projects/schema.json", "ray/projects/templates/cluster_template.yaml",
"ray/projects/templates/project_template.yaml",
"ray/projects/templates/requirements.txt"
]
ray_dashboard_files = [
os.path.join(dirpath, filename)
for dirpath, dirnames, filenames in os.walk("ray/dashboard/client/build")
for filename in filenames
]
optional_ray_files += ray_autoscaler_files
optional_ray_files += ray_project_files
optional_ray_files += ray_dashboard_files
if os.getenv("RAY_USE_NEW_GCS") == "on":
ray_files += [
"ray/core/src/credis/build/src/libmember.so",
"ray/core/src/credis/build/src/libmaster.so",
"ray/core/src/credis/redis/src/redis-server" + exe_suffix,
]
# If you're adding dependencies for ray extras, please
# also update the matching section of requirements.txt
# in this directory
extras = {
"debug": [],
"serve": ["uvicorn", "flask", "blist", "requests"],
"tune": ["tabulate", "tensorboardX", "pandas"]
}
extras["rllib"] = extras["tune"] + [
"atari_py",
"dm_tree",
"gym[atari]",
"lz4",
"opencv-python-headless<=4.3.0.36",
"pyyaml",
"scipy",
]
extras["streaming"] = []
extras["all"] = list(set(chain.from_iterable(extras.values())))
# These are the main dependencies for users of ray. This list
# should be carefully curated. If you change it, please reflect
# the change in the matching section of requirements.txt
install_requires = [
"aiohttp",
"aiohttp_cors",
"aioredis",
"click >= 7.0",
"colorama",
"colorful",
"filelock",
"google",
"gpustat",
"grpcio >= 1.28.1",
"jsonschema",
"msgpack >= 1.0.0, < 2.0.0",
"numpy >= 1.16",
"protobuf >= 3.8.0",
"py-spy >= 0.2.0",
"pyyaml",
"requests",
"redis >= 3.3.2, < 3.5.0",
"opencensus",
"prometheus_client >= 0.7.1",
]
def is_native_windows_or_msys():
"""Check to see if we are running on native Windows,
but NOT WSL (which is seen as Linux)."""
return sys.platform == "msys" or sys.platform == "win32"
def is_invalid_windows_platform():
# 'GCC' check is how you detect MinGW:
# https://github.com/msys2/MINGW-packages/blob/abd06ca92d876b9db05dd65f27d71c4ebe2673a9/mingw-w64-python2/0410-MINGW-build-extensions-with-GCC.patch#L53
platform = sys.platform
ver = sys.version
return platform == "msys" or (platform == "win32" and ver and "GCC" in ver)
# Calls Bazel in PATH, falling back to the standard user installatation path
# (~/.bazel/bin/bazel) if it isn't found.
def bazel_invoke(invoker, cmdline, *args, **kwargs):
home = os.path.expanduser("~")
candidates = ["bazel"]
if sys.platform == "win32":
mingw_dir = os.getenv("MINGW_DIR")
if mingw_dir:
candidates.append(mingw_dir + "/bin/bazel.exe")
else:
candidates.append(os.path.join(home, ".bazel", "bin", "bazel"))
result = None
for i, cmd in enumerate(candidates):
try:
result = invoker([cmd] + cmdline, *args, **kwargs)
break
except IOError:
if i >= len(candidates) - 1:
raise
return result
def download(url):
try:
result = urllib.request.urlopen(url).read()
except urllib.error.URLError:
# This fallback is necessary on Python 3.5 on macOS due to TLS 1.2.
curl_args = ["curl", "-s", "-L", "-f", "-o", "-", url]
result = subprocess.check_output(curl_args)
return result
# Installs pickle5-backport into the local subdirectory.
def download_pickle5(pickle5_dir):
pickle5_file = urllib.parse.unquote(
urllib.parse.urlparse(pickle5_url).path)
pickle5_name = re.sub("\\.tar\\.gz$", ".tgz", pickle5_file, flags=re.I)
url_path_parts = os.path.splitext(pickle5_name)[0].split("/")
(project, commit) = (url_path_parts[2], url_path_parts[4])
pickle5_archive = download(pickle5_url)
with tempfile.TemporaryDirectory() as work_dir:
tf = tarfile.open(None, "r", io.BytesIO(pickle5_archive))
try:
tf.extractall(work_dir)
finally:
tf.close()
src_dir = os.path.join(work_dir, project + "-" + commit)
args = [sys.executable, "setup.py", "-q", "bdist_wheel"]
subprocess.check_call(args, cwd=src_dir)
for wheel in glob.glob(os.path.join(src_dir, "dist", "*.whl")):
wzf = zipfile.ZipFile(wheel, "r")
try:
wzf.extractall(pickle5_dir)
finally:
wzf.close()
def build(build_python, build_java):
if tuple(sys.version_info[:2]) not in SUPPORTED_PYTHONS:
msg = ("Detected Python version {}, which is not supported. "
"Only Python {} are supported.").format(
".".join(map(str, sys.version_info[:2])),
", ".join(".".join(map(str, v)) for v in SUPPORTED_PYTHONS))
raise RuntimeError(msg)
if is_invalid_windows_platform():
msg = ("Please use official native CPython on Windows,"
" not Cygwin/MSYS/MSYS2/MinGW/etc.\n" +
"Detected: {}\n at: {!r}".format(sys.version, sys.executable))
raise OSError(msg)
bazel_env = dict(os.environ, PYTHON3_BIN_PATH=sys.executable)
if is_native_windows_or_msys():
SHELL = bazel_env.get("SHELL")
if SHELL:
bazel_env.setdefault("BAZEL_SH", os.path.normpath(SHELL))
BAZEL_SH = bazel_env["BAZEL_SH"]
SYSTEMROOT = os.getenv("SystemRoot")
wsl_bash = os.path.join(SYSTEMROOT, "System32", "bash.exe")
if (not BAZEL_SH) and SYSTEMROOT and os.path.isfile(wsl_bash):
msg = ("You appear to have Bash from WSL,"
" which Bazel may invoke unexpectedly. "
"To avoid potential problems,"
" please explicitly set the {name!r}"
" environment variable for Bazel.").format(name="BAZEL_SH")
raise RuntimeError(msg)
# Check if the current Python already has pickle5 (either comes with newer
# Python versions, or has been installed by us before).
pickle5 = None
if sys.version_info >= (3, 8, 2):
import pickle as pickle5
else:
try:
import pickle5
except ImportError:
pass
if not pickle5:
download_pickle5(os.path.join(ROOT_DIR, PICKLE5_SUBDIR))
# Note: We are passing in sys.executable so that we use the same
# version of Python to build packages inside the build.sh script. Note
# that certain flags will not be passed along such as --user or sudo.
# TODO(rkn): Fix this.
if not os.getenv("SKIP_THIRDPARTY_INSTALL"):
pip_packages = ["psutil", "setproctitle"]
subprocess.check_call(
[
sys.executable, "-m", "pip", "install", "-q",
"--target=" + os.path.join(ROOT_DIR, THIRDPARTY_SUBDIR)
] + pip_packages,
env=dict(os.environ, CC="gcc"))
version_info = bazel_invoke(subprocess.check_output, ["--version"])
bazel_version_str = version_info.rstrip().decode("utf-8").split(" ", 1)[1]
bazel_version_split = bazel_version_str.split(".")
bazel_version_digits = [
"".join(takewhile(str.isdigit, s)) for s in bazel_version_split
]
bazel_version = tuple(map(int, bazel_version_digits))
if bazel_version < SUPPORTED_BAZEL:
logger.warning("Expected Bazel version {} but found {}".format(
".".join(map(str, SUPPORTED_BAZEL)), bazel_version_str))
bazel_targets = []
bazel_targets += ["//:ray_pkg"] if build_python else []
bazel_targets += ["//java:ray_java_pkg"] if build_java else []
return bazel_invoke(
subprocess.check_call,
["build", "--verbose_failures", "--"] + bazel_targets,
env=bazel_env)
def walk_directory(directory):
file_list = []
for (root, dirs, filenames) in os.walk(directory):
for name in filenames:
file_list.append(os.path.join(root, name))
return file_list
def move_file(target_dir, filename):
# TODO(rkn): This feels very brittle. It may not handle all cases. See
# https://github.com/apache/arrow/blob/master/python/setup.py for an
# example.
source = filename
destination = os.path.join(target_dir, filename)
# Create the target directory if it doesn't already exist.
os.makedirs(os.path.dirname(destination), exist_ok=True)
if not os.path.exists(destination):
print("Copying {} to {}.".format(source, destination))
if sys.platform == "win32":
# Does not preserve file mode (needed to avoid read-only bit)
shutil.copyfile(source, destination, follow_symlinks=True)
else:
# Preserves file mode (needed to copy executable bit)
shutil.copy(source, destination, follow_symlinks=True)
def find_version(*filepath):
# Extract version information from filepath
with open(os.path.join(ROOT_DIR, *filepath)) as fp:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
fp.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def pip_run(build_ext):
build(True, BUILD_JAVA)
files_to_include = list(ray_files)
# We also need to install pickle5 along with Ray, so make sure that the
# relevant non-Python pickle5 files get copied.
pickle5_dir = os.path.join(ROOT_DIR, PICKLE5_SUBDIR)
files_to_include += walk_directory(os.path.join(pickle5_dir, "pickle5"))
thirdparty_dir = os.path.join(ROOT_DIR, THIRDPARTY_SUBDIR)
files_to_include += walk_directory(thirdparty_dir)
# Copy over the autogenerated protobuf Python bindings.
for directory in generated_python_directories:
for filename in os.listdir(directory):
if filename[-3:] == ".py":
files_to_include.append(os.path.join(directory, filename))
for filename in files_to_include:
move_file(build_ext.build_lib, filename)
# Try to copy over the optional files.
for filename in optional_ray_files:
try:
move_file(build_ext.build_lib, filename)
except Exception:
print("Failed to copy optional file {}. This is ok."
.format(filename))
def api_main(program, *args):
parser = argparse.ArgumentParser()
choices = ["build", "bazel_version", "python_versions", "clean", "help"]
parser.add_argument("command", type=str, choices=choices)
parser.add_argument(
"-l",
"--language",
default="python",
type=str,
help="A list of languages to build native libraries. "
"Supported languages include \"python\" and \"java\". "
"If not specified, only the Python library will be built.")
parsed_args = parser.parse_args(args)
result = None
if parsed_args.command == "build":
kwargs = dict(build_python=False, build_java=False)
for lang in parsed_args.language.split(","):
if "python" in lang:
kwargs.update(build_python=True)
elif "java" in lang:
kwargs.update(build_java=True)
else:
raise ValueError("invalid language: {!r}".format(lang))
result = build(**kwargs)
elif parsed_args.command == "bazel_version":
print(".".join(map(str, SUPPORTED_BAZEL)))
elif parsed_args.command == "python_versions":
for version in SUPPORTED_PYTHONS:
# NOTE: On Windows this will print "\r\n" on the command line.
# Strip it out by piping to tr -d "\r".
print(".".join(map(str, version)))
elif parsed_args.command == "clean":
def onerror(function, path, excinfo):
nonlocal result
if excinfo[1].errno != errno.ENOENT:
msg = excinfo[1].strerror
logger.error("cannot remove {}: {}".format(path, msg))
result = 1
for subdir in CLEANABLE_SUBDIRS:
shutil.rmtree(os.path.join(ROOT_DIR, subdir), onerror=onerror)
elif parsed_args.command == "help":
parser.print_help()
else:
raise ValueError("Invalid command: {!r}".format(parsed_args.command))
return result
if __name__ == "__api__":
api_main(*sys.argv)
if __name__ == "__main__":
import setuptools
import setuptools.command.build_ext
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
return pip_run(self)
class BinaryDistribution(setuptools.Distribution):
def has_ext_modules(self):
return True
setuptools.setup(
name="ray",
version=find_version("ray", "__init__.py"),
author="Ray Team",
author_email="[email protected]",
description=("A system for parallel and distributed Python that "
"unifies the ML ecosystem."),
long_description=io.open(
os.path.join(ROOT_DIR, os.path.pardir, "README.rst"),
"r",
encoding="utf-8").read(),
url="https://github.com/ray-project/ray",
keywords=("ray distributed parallel machine-learning "
"reinforcement-learning deep-learning python"),
packages=setuptools.find_packages(),
cmdclass={"build_ext": build_ext},
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=install_requires,
setup_requires=["cython >= 0.29.14", "wheel"],
extras_require=extras,
entry_points={
"console_scripts": [
"ray=ray.scripts.scripts:main",
"rllib=ray.rllib.scripts:cli [rllib]", "tune=ray.tune.scripts:cli"
]
},
include_package_data=True,
zip_safe=False,
license="Apache 2.0") if __name__ == "__main__" else None
| []
| []
| [
"SystemRoot",
"RAY_INSTALL_JAVA",
"SKIP_THIRDPARTY_INSTALL",
"RAY_USE_NEW_GCS",
"MINGW_DIR"
]
| [] | ["SystemRoot", "RAY_INSTALL_JAVA", "SKIP_THIRDPARTY_INSTALL", "RAY_USE_NEW_GCS", "MINGW_DIR"] | python | 5 | 0 | |
Tests/scripts/gitlab_slack_notifier.py | import argparse
import logging
import os
from typing import Tuple, Optional
import gitlab
from slack import WebClient as SlackClient
from Tests.Marketplace.marketplace_services import get_upload_data
from Tests.Marketplace.marketplace_constants import BucketUploadFlow
from Tests.scripts.utils.log_util import install_logging
DEMISTO_GREY_ICON = 'https://3xqz5p387rui1hjtdv1up7lw-wpengine.netdna-ssl.com/wp-content/' \
'uploads/2018/07/Demisto-Icon-Dark.png'
ROOT_ARTIFACTS_FOLDER = os.getenv('ARTIFACTS_FOLDER', './artifacts')
ARTIFACTS_FOLDER_XSOAR = os.getenv('ARTIFACTS_FOLDER_XSOAR', './artifacts/xsoar')
ARTIFACTS_FOLDER_MPV2 = os.getenv('ARTIFACTS_FOLDER_MPV2', './artifacts/marketplacev2')
CONTENT_CHANNEL = 'dmst-content-team'
GITLAB_PROJECT_ID = os.getenv('CI_PROJECT_ID') or 2596 # the default is the id of the content repo in code.pan.run
GITLAB_SERVER_URL = os.getenv('CI_SERVER_URL', 'https://code.pan.run') # disable-secrets-detection
CONTENT_NIGHTLY = 'Content Nightly'
BUCKET_UPLOAD = 'Upload Packs to Marketplace Storage'
BUCKET_V2_UPLOAD = 'Upload Packs to Marketplace v2 Storage'
SDK_NIGHTLY = 'Demisto SDK Nightly'
PRIVATE_NIGHTLY = 'Private Nightly'
WORKFLOW_TYPES = {CONTENT_NIGHTLY, SDK_NIGHTLY, BUCKET_UPLOAD, PRIVATE_NIGHTLY, BUCKET_V2_UPLOAD}
def options_handler():
parser = argparse.ArgumentParser(description='Parser for slack_notifier args')
parser.add_argument('-u', '--url', help='The gitlab server url', default=GITLAB_SERVER_URL)
parser.add_argument('-p', '--pipeline_id', help='The pipeline id to check the status of', required=True)
parser.add_argument('-s', '--slack_token', help='The token for slack', required=True)
parser.add_argument('-c', '--ci_token', help='The token for circleci/gitlab', required=True)
parser.add_argument(
'-ch', '--slack_channel', help='The slack channel in which to send the notification', default=CONTENT_CHANNEL
)
parser.add_argument('-gp', '--gitlab_project_id', help='The gitlab project id', default=GITLAB_PROJECT_ID)
parser.add_argument(
'-tw', '--triggering-workflow', help='The type of ci pipeline workflow the notifier is reporting on',
choices=WORKFLOW_TYPES)
options = parser.parse_args()
return options
def get_artifact_data(artifact_folder, artifact_relative_path: str) -> Optional[str]:
"""
Retrieves artifact data according to the artifact relative path from 'ARTIFACTS_FOLDER' given.
Args:
artifact_folder (str): Full path of the artifact root folder.
artifact_relative_path (str): Relative path of an artifact file.
Returns:
(Optional[str]): data of the artifact as str if exists, None otherwise.
"""
artifact_data = None
try:
file_name = os.path.join(artifact_folder, artifact_relative_path)
if os.path.isfile(file_name):
logging.info(f'Extracting {artifact_relative_path}')
with open(file_name, 'r') as file_data:
artifact_data = file_data.read()
else:
logging.info(f'Did not find {artifact_relative_path} file')
except Exception:
logging.exception(f'Error getting {artifact_relative_path} file')
return artifact_data
def test_playbooks_results(artifact_folder):
failed_tests_data = get_artifact_data(artifact_folder, 'failed_tests.txt')
failed_tests = failed_tests_data.split('\n') if failed_tests_data else []
skipped_tests_data = get_artifact_data(artifact_folder, 'skipped_tests.txt')
skipped_tests = skipped_tests_data.split('\n') if skipped_tests_data else []
skipped_integrations_data = get_artifact_data(artifact_folder, 'skipped_tests.txt')
skipped_integrations = skipped_integrations_data.split('\n') if skipped_integrations_data else []
content_team_fields = []
if failed_tests:
field_failed_tests = {
"title": "Failed tests - ({})".format(len(failed_tests)),
"value": '\n'.join(failed_tests),
"short": False
}
content_team_fields.append(field_failed_tests)
if skipped_tests:
field_skipped_tests = {
"title": "Skipped tests - ({})".format(len(skipped_tests)),
"value": '',
"short": True
}
content_team_fields.append(field_skipped_tests)
if skipped_integrations:
field_skipped_integrations = {
"title": "Skipped integrations - ({})".format(len(skipped_integrations)),
"value": '',
"short": True
}
content_team_fields.append(field_skipped_integrations)
return content_team_fields
def unit_tests_results():
failing_tests = get_artifact_data(ROOT_ARTIFACTS_FOLDER, 'failed_lint_report.txt')
slack_results = []
if failing_tests:
failing_test_list = failing_tests.split('\n')
slack_results.append({
"title": f'{"Failed Unit Tests"} - ({len(failing_test_list)})',
"value": '\n'.join(failing_test_list),
"short": False
})
return slack_results
def bucket_upload_results(bucket_artifact_folder):
steps_fields = []
pack_results_path = os.path.join(bucket_artifact_folder, BucketUploadFlow.PACKS_RESULTS_FILE)
logging.info(f'retrieving upload data from "{pack_results_path}"')
successful_packs, failed_packs, successful_private_packs, _ = get_upload_data(
pack_results_path, BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
)
if successful_packs:
steps_fields += [{
"title": "Successful Packs:",
"value": "\n".join(sorted([pack_name for pack_name in {*successful_packs}], key=lambda s: s.lower())),
"short": False
}]
if failed_packs:
steps_fields += [{
"title": "Failed Packs:",
"value": "\n".join(sorted([pack_name for pack_name in {*failed_packs}], key=lambda s: s.lower())),
"short": False
}]
if successful_private_packs:
steps_fields += [{
"title": "Successful Private Packs:",
"value": "\n".join(sorted([pack_name for pack_name in {*successful_private_packs}],
key=lambda s: s.lower())),
"short": False
}]
return steps_fields
def construct_slack_msg(triggering_workflow, pipeline_url, pipeline_failed_jobs) -> list:
title = triggering_workflow
if pipeline_failed_jobs:
title += ' - Failure'
color = 'danger'
else:
title += ' - Success'
color = 'good'
# report failing jobs
content_fields = []
failed_jobs_names = {job.name for job in pipeline_failed_jobs}
if failed_jobs_names:
content_fields.append({
"title": f'Failed Jobs - ({len(failed_jobs_names)})',
"value": '\n'.join(failed_jobs_names),
"short": False
})
# report failing unit-tests
triggering_workflow_lower = triggering_workflow.lower()
check_unittests_substrings = {'lint', 'unit', 'demisto sdk nightly'}
failed_jobs_or_workflow_title = {job_name.lower() for job_name in failed_jobs_names}
failed_jobs_or_workflow_title.add(triggering_workflow_lower)
for means_include_unittests_results in failed_jobs_or_workflow_title:
if any({substr in means_include_unittests_results for substr in check_unittests_substrings}):
content_fields += unit_tests_results()
break
# report pack updates
if 'upload' in triggering_workflow_lower:
content_fields += bucket_upload_results(ARTIFACTS_FOLDER_XSOAR)
content_fields += bucket_upload_results(ARTIFACTS_FOLDER_MPV2)
# report failing test-playbooks
if 'content nightly' in triggering_workflow_lower:
content_fields += test_playbooks_results(ARTIFACTS_FOLDER_XSOAR)
slack_msg = [{
'fallback': title,
'color': color,
'title': title,
'title_link': pipeline_url,
'fields': content_fields
}]
return slack_msg
def collect_pipeline_data(gitlab_client, project_id, pipeline_id) -> Tuple[str, list]:
project = gitlab_client.projects.get(int(project_id))
pipeline = project.pipelines.get(int(pipeline_id))
jobs = pipeline.jobs.list()
failed_jobs = []
for job in jobs:
logging.info(f'status of gitlab job with id {job.id} and name {job.name} is {job.status}')
if job.status == 'failed':
logging.info(f'collecting failed job {job.name}')
logging.info(f'pipeline associated with failed job is {job.pipeline.get("web_url")}')
failed_jobs.append(job)
return pipeline.web_url, failed_jobs
def main():
install_logging('Slack_Notifier.log')
options = options_handler()
server_url = options.url
slack_token = options.slack_token
ci_token = options.ci_token
project_id = options.gitlab_project_id
pipeline_id = options.pipeline_id
triggering_workflow = options.triggering_workflow # ci workflow type that is triggering the slack notifier
slack_channel = options.slack_channel
gitlab_client = gitlab.Gitlab(server_url, private_token=ci_token)
pipeline_url, pipeline_failed_jobs = collect_pipeline_data(gitlab_client, project_id, pipeline_id)
slack_msg_data = construct_slack_msg(triggering_workflow, pipeline_url, pipeline_failed_jobs)
slack_client = SlackClient(slack_token)
username = 'Content GitlabCI'
slack_client.api_call(
"chat.postMessage",
json={
'channel': slack_channel,
'username': username,
'as_user': 'False',
'attachments': slack_msg_data
}
)
if __name__ == '__main__':
main()
| []
| []
| [
"ARTIFACTS_FOLDER_XSOAR",
"CI_SERVER_URL",
"CI_PROJECT_ID",
"ARTIFACTS_FOLDER",
"ARTIFACTS_FOLDER_MPV2"
]
| [] | ["ARTIFACTS_FOLDER_XSOAR", "CI_SERVER_URL", "CI_PROJECT_ID", "ARTIFACTS_FOLDER", "ARTIFACTS_FOLDER_MPV2"] | python | 5 | 0 | |
src/item/command.py | import os
import discord
from discord.ext import commands
from src.common.utils import InputParser, number_to_emoji, emoji_to_number
from src.item.embed import ItemEmbed
from src.common.embed import CommonEmbed
from src.orm.queries.item import db_item
from src.orm.queries.header import db_header
class ItemCog(commands.Cog):
"""
Commands related to Item
...
Attributes
----------
bot : commands.Bot
Discord.ext class that implements Bot class
Methods
-------
item(ctx, *args)
Retrieve embeds related to command '?item'
"""
def __init__(self, bot: commands.Bot):
self._bot = bot
self.name = 'Item Cog'
self.description = '''Items commands MH Rise Wiki'''
self.__dbItem = db_item
self.__dbHeader = db_header
self._item_img_route = os.getenv('ITEM_IMG_ROUTE')
self._map_img_route = os.getenv('ITEM_LOCATION_ROUTE')
@commands.command(name='item')
async def item(self, ctx: commands.Context, *args):
"""Manage rendered embed of command '?item'
Parameters
----------
ctx : commands.Context
context class that store data related to discord server
*args : list
List of params sent when the command was called
Returns
-------
Message
retrieve rendered embed
"""
item_name = InputParser(args).concat()
dct = self.__dbItem.get_item(str(ctx.guild.id), item_name)
if dct == None:
dct = self.__dbHeader.entity_not_found(str(ctx.guild.id), 'item_not_found')
foooter = self.__dbHeader.get_footer(str(ctx.guild.id), 'general_footer')
embed = CommonEmbed(dct, foooter, ctx)
await ctx.send(embed=embed.notFound())
else:
headers = self.__dbHeader.get_headers(str(ctx.guild.id), ctx.invoked_with)
thumbnail_file = discord.File(self._item_img_route+dct['icon'], filename=dct['icon'])
embed = ItemEmbed(dct, headers)
embed_main, maps_embeds = embed.main()
if len(maps_embeds) == 0:
await ctx.send(embed = embed_main, file=thumbnail_file)
else:
message = await ctx.send(embed = embed_main, file=thumbnail_file)
valid_reactions = []
for k in range(0,len(maps_embeds)):
await message.add_reaction(number_to_emoji(k+1))
valid_reactions.append(number_to_emoji(k+1))
def check(reaction, user):
return user == ctx.author
reaction = None
reaction_used = []
while True:
if str(reaction) in valid_reactions and str(reaction) not in reaction_used:
i = emoji_to_number(str(reaction))
reaction_used.append(str(reaction))
map_file = discord.File(self._map_img_route+maps_embeds[i-1]['map-img'],
filename=maps_embeds[i-1]['map-img'])
await ctx.send(embed=maps_embeds[i-1]['embed'], file=map_file)
try:
reaction, user = await self._bot.wait_for(event='reaction_add', timeout = 60.0, check = check)
await message.remove_reaction(reaction, user)
except:
break
await message.clear_reactions()
| []
| []
| [
"ITEM_IMG_ROUTE",
"ITEM_LOCATION_ROUTE"
]
| [] | ["ITEM_IMG_ROUTE", "ITEM_LOCATION_ROUTE"] | python | 2 | 0 | |
cmd/integrationArtifactGetServiceEndpoint_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type integrationArtifactGetServiceEndpointOptions struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
IntegrationFlowID string `json:"integrationFlowId,omitempty"`
Platform string `json:"platform,omitempty"`
Host string `json:"host,omitempty"`
OAuthTokenProviderURL string `json:"oAuthTokenProviderUrl,omitempty"`
}
type integrationArtifactGetServiceEndpointCommonPipelineEnvironment struct {
custom struct {
iFlowServiceEndpoint string
}
}
func (p *integrationArtifactGetServiceEndpointCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value interface{}
}{
{category: "custom", name: "iFlowServiceEndpoint", value: p.custom.iFlowServiceEndpoint},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// IntegrationArtifactGetServiceEndpointCommand Get an deployed CPI intgeration flow service endpoint
func IntegrationArtifactGetServiceEndpointCommand() *cobra.Command {
const STEP_NAME = "integrationArtifactGetServiceEndpoint"
metadata := integrationArtifactGetServiceEndpointMetadata()
var stepConfig integrationArtifactGetServiceEndpointOptions
var startTime time.Time
var commonPipelineEnvironment integrationArtifactGetServiceEndpointCommonPipelineEnvironment
var createIntegrationArtifactGetServiceEndpointCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Get an deployed CPI intgeration flow service endpoint",
Long: `With this step you can obtain information about the service endpoints exposed by SAP Cloud Platform Integration on a tenant using OData API. Learn more about the SAP Cloud Integration remote API for getting service endpoint of deployed integration artifact [here](https://help.sap.com/viewer/368c481cd6954bdfa5d0435479fd4eaf/Cloud/en-US/d1679a80543f46509a7329243b595bdb.html).`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
integrationArtifactGetServiceEndpoint(stepConfig, &telemetryData, &commonPipelineEnvironment)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addIntegrationArtifactGetServiceEndpointFlags(createIntegrationArtifactGetServiceEndpointCmd, &stepConfig)
return createIntegrationArtifactGetServiceEndpointCmd
}
func addIntegrationArtifactGetServiceEndpointFlags(cmd *cobra.Command, stepConfig *integrationArtifactGetServiceEndpointOptions) {
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User to authenticate to the SAP Cloud Platform Integration Service")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password to authenticate to the SAP Cloud Platform Integration Service")
cmd.Flags().StringVar(&stepConfig.IntegrationFlowID, "integrationFlowId", os.Getenv("PIPER_integrationFlowId"), "Specifies the ID of the Integration Flow artifact")
cmd.Flags().StringVar(&stepConfig.Platform, "platform", os.Getenv("PIPER_platform"), "Specifies the running platform of the SAP Cloud platform integraion service")
cmd.Flags().StringVar(&stepConfig.Host, "host", os.Getenv("PIPER_host"), "Specifies the protocol and host address, including the port. Please provide in the format `<protocol>://<host>:<port>`. Supported protocols are `http` and `https`.")
cmd.Flags().StringVar(&stepConfig.OAuthTokenProviderURL, "oAuthTokenProviderUrl", os.Getenv("PIPER_oAuthTokenProviderUrl"), "Specifies the oAuth Provider protocol and host address, including the port. Please provide in the format `<protocol>://<host>:<port>`. Supported protocols are `http` and `https`.")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("integrationFlowId")
cmd.MarkFlagRequired("host")
cmd.MarkFlagRequired("oAuthTokenProviderUrl")
}
// retrieve step metadata
func integrationArtifactGetServiceEndpointMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "integrationArtifactGetServiceEndpoint",
Aliases: []config.Alias{},
Description: "Get an deployed CPI intgeration flow service endpoint",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "cpiCredentialsId",
Param: "username",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "cpiCredentialsId",
Param: "password",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "integrationFlowId",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "platform",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "host",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "oAuthTokenProviderUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "commonPipelineEnvironment",
Type: "piperEnvironment",
Parameters: []map[string]interface{}{
{"Name": "custom/iFlowServiceEndpoint"},
},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_integrationFlowId\"",
"\"PIPER_platform\"",
"\"PIPER_host\"",
"\"PIPER_oAuthTokenProviderUrl\""
]
| []
| [
"PIPER_oAuthTokenProviderUrl",
"PIPER_host",
"PIPER_integrationFlowId",
"PIPER_password",
"PIPER_username",
"PIPER_platform"
]
| [] | ["PIPER_oAuthTokenProviderUrl", "PIPER_host", "PIPER_integrationFlowId", "PIPER_password", "PIPER_username", "PIPER_platform"] | go | 6 | 0 | |
middleware/logger/logger.go | package logger
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/internal/bytebufferpool"
"github.com/gofiber/fiber/v2/internal/colorable"
"github.com/gofiber/fiber/v2/internal/fasttemplate"
"github.com/gofiber/fiber/v2/internal/isatty"
"github.com/valyala/fasthttp"
)
// Logger variables
const (
TagPid = "pid"
TagTime = "time"
TagReferer = "referer"
TagProtocol = "protocol"
TagPort = "port"
TagIP = "ip"
TagIPs = "ips"
TagHost = "host"
TagMethod = "method"
TagPath = "path"
TagURL = "url"
TagUA = "ua"
TagLatency = "latency"
TagStatus = "status"
TagResBody = "resBody"
TagQueryStringParams = "queryParams"
TagBody = "body"
TagBytesSent = "bytesSent"
TagBytesReceived = "bytesReceived"
TagRoute = "route"
TagError = "error"
// DEPRECATED: Use TagReqHeader instead
TagHeader = "header:"
TagReqHeader = "reqHeader:"
TagRespHeader = "respHeader:"
TagLocals = "locals:"
TagQuery = "query:"
TagForm = "form:"
TagCookie = "cookie:"
TagBlack = "black"
TagRed = "red"
TagGreen = "green"
TagYellow = "yellow"
TagBlue = "blue"
TagMagenta = "magenta"
TagCyan = "cyan"
TagWhite = "white"
TagReset = "reset"
)
// Color values
const (
cBlack = "\u001b[90m"
cRed = "\u001b[91m"
cGreen = "\u001b[92m"
cYellow = "\u001b[93m"
cBlue = "\u001b[94m"
cMagenta = "\u001b[95m"
cCyan = "\u001b[96m"
cWhite = "\u001b[97m"
cReset = "\u001b[0m"
)
// New creates a new middleware handler
func New(config ...Config) fiber.Handler {
// Set default config
cfg := configDefault(config...)
// Get timezone location
tz, err := time.LoadLocation(cfg.TimeZone)
if err != nil || tz == nil {
cfg.timeZoneLocation = time.Local
} else {
cfg.timeZoneLocation = tz
}
// Check if format contains latency
cfg.enableLatency = strings.Contains(cfg.Format, "${latency}")
// Create template parser
tmpl := fasttemplate.New(cfg.Format, "${", "}")
// Create correct timeformat
var timestamp atomic.Value
timestamp.Store(time.Now().In(cfg.timeZoneLocation).Format(cfg.TimeFormat))
// Update date/time every 750 milliseconds in a separate go routine
if strings.Contains(cfg.Format, "${time}") {
go func() {
for {
time.Sleep(cfg.TimeInterval)
timestamp.Store(time.Now().In(cfg.timeZoneLocation).Format(cfg.TimeFormat))
}
}()
}
// Set PID once
pid := strconv.Itoa(os.Getpid())
// Set variables
var (
once sync.Once
mu sync.Mutex
errHandler fiber.ErrorHandler
)
// If colors are enabled, check terminal compatibility
if cfg.enableColors {
cfg.Output = colorable.NewColorableStderr()
if os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(os.Stderr.Fd()) && !isatty.IsCygwinTerminal(os.Stderr.Fd())) {
cfg.Output = colorable.NewNonColorable(os.Stderr)
}
}
var errPadding = 15
var errPaddingStr = strconv.Itoa(errPadding)
// Return new handler
return func(c *fiber.Ctx) (err error) {
// Don't execute middleware if Next returns true
if cfg.Next != nil && cfg.Next(c) {
return c.Next()
}
// Set error handler once
once.Do(func() {
// get longested possible path
stack := c.App().Stack()
for m := range stack {
for r := range stack[m] {
if len(stack[m][r].Path) > errPadding {
errPadding = len(stack[m][r].Path)
errPaddingStr = strconv.Itoa(errPadding)
}
}
}
// override error handler
errHandler = c.App().ErrorHandler
})
var start, stop time.Time
// Set latency start time
if cfg.enableLatency {
start = time.Now()
}
// Handle request, store err for logging
chainErr := c.Next()
// Manually call error handler
if chainErr != nil {
if err := errHandler(c, chainErr); err != nil {
_ = c.SendStatus(fiber.StatusInternalServerError)
}
}
// Set latency stop time
if cfg.enableLatency {
stop = time.Now()
}
// Get new buffer
buf := bytebufferpool.Get()
// Default output when no custom Format or io.Writer is given
if cfg.enableColors {
// Format error if exist
formatErr := ""
if chainErr != nil {
formatErr = cRed + " | " + chainErr.Error() + cReset
}
// Format log to buffer
_, _ = buf.WriteString(fmt.Sprintf("%s |%s %3d %s| %7v | %15s |%s %-7s %s| %-"+errPaddingStr+"s %s\n",
timestamp.Load().(string),
statusColor(c.Response().StatusCode()), c.Response().StatusCode(), cReset,
stop.Sub(start).Round(time.Millisecond),
c.IP(),
methodColor(c.Method()), c.Method(), cReset,
c.Path(),
formatErr,
))
// Write buffer to output
_, _ = cfg.Output.Write(buf.Bytes())
// Put buffer back to pool
bytebufferpool.Put(buf)
// End chain
return nil
}
// Loop over template tags to replace it with the correct value
_, err = tmpl.ExecuteFunc(buf, func(w io.Writer, tag string) (int, error) {
switch tag {
case TagTime:
return buf.WriteString(timestamp.Load().(string))
case TagReferer:
return buf.WriteString(c.Get(fiber.HeaderReferer))
case TagProtocol:
return buf.WriteString(c.Protocol())
case TagPid:
return buf.WriteString(pid)
case TagPort:
return buf.WriteString(c.Port())
case TagIP:
return buf.WriteString(c.IP())
case TagIPs:
return buf.WriteString(c.Get(fiber.HeaderXForwardedFor))
case TagHost:
return buf.WriteString(c.Hostname())
case TagPath:
return buf.WriteString(c.Path())
case TagURL:
return buf.WriteString(c.OriginalURL())
case TagUA:
return buf.WriteString(c.Get(fiber.HeaderUserAgent))
case TagLatency:
return buf.WriteString(stop.Sub(start).String())
case TagBody:
return buf.Write(c.Body())
case TagBytesReceived:
return appendInt(buf, len(c.Request().Body()))
case TagBytesSent:
return appendInt(buf, len(c.Response().Body()))
case TagRoute:
return buf.WriteString(c.Route().Path)
case TagStatus:
return appendInt(buf, c.Response().StatusCode())
case TagResBody:
return buf.Write(c.Response().Body())
case TagQueryStringParams:
return buf.WriteString(c.Request().URI().QueryArgs().String())
case TagMethod:
return buf.WriteString(c.Method())
case TagBlack:
return buf.WriteString(cBlack)
case TagRed:
return buf.WriteString(cRed)
case TagGreen:
return buf.WriteString(cGreen)
case TagYellow:
return buf.WriteString(cYellow)
case TagBlue:
return buf.WriteString(cBlue)
case TagMagenta:
return buf.WriteString(cMagenta)
case TagCyan:
return buf.WriteString(cCyan)
case TagWhite:
return buf.WriteString(cWhite)
case TagReset:
return buf.WriteString(cReset)
case TagError:
if chainErr != nil {
return buf.WriteString(chainErr.Error())
}
return buf.WriteString("-")
default:
// Check if we have a value tag i.e.: "reqHeader:x-key"
switch {
case strings.HasPrefix(tag, TagReqHeader):
return buf.WriteString(c.Get(tag[10:]))
case strings.HasPrefix(tag, TagHeader):
return buf.WriteString(c.Get(tag[7:]))
case strings.HasPrefix(tag, TagRespHeader):
return buf.WriteString(c.GetRespHeader(tag[11:]))
case strings.HasPrefix(tag, TagQuery):
return buf.WriteString(c.Query(tag[6:]))
case strings.HasPrefix(tag, TagForm):
return buf.WriteString(c.FormValue(tag[5:]))
case strings.HasPrefix(tag, TagCookie):
return buf.WriteString(c.Cookies(tag[7:]))
case strings.HasPrefix(tag, TagLocals):
switch v := c.Locals(tag[7:]).(type) {
case []byte:
return buf.Write(v)
case string:
return buf.WriteString(v)
case nil:
return 0, nil
default:
return buf.WriteString(fmt.Sprintf("%v", v))
}
}
}
return 0, nil
})
// Also write errors to the buffer
if err != nil {
_, _ = buf.WriteString(err.Error())
}
mu.Lock()
// Write buffer to output
if _, err := cfg.Output.Write(buf.Bytes()); err != nil {
// Write error to output
if _, err := cfg.Output.Write([]byte(err.Error())); err != nil {
// There is something wrong with the given io.Writer
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
}
mu.Unlock()
// Put buffer back to pool
bytebufferpool.Put(buf)
return nil
}
}
func appendInt(buf *bytebufferpool.ByteBuffer, v int) (int, error) {
old := len(buf.B)
buf.B = fasthttp.AppendUint(buf.B, v)
return len(buf.B) - old, nil
}
| [
"\"TERM\""
]
| []
| [
"TERM"
]
| [] | ["TERM"] | go | 1 | 0 | |
fight_simulator/config.py | import os
class DevelopmentConfig(object):
SQLALCHEMY_DATABASE_URI = "postgresql://localhost:5432/fight_simulator_db"
DEBUG = True
SECRET_KEY = os.environ.get("FIGHT_SIMULATOR_SECRET_KEY", os.urandom(12))
class TestingConfig(object):
SQLALCHEMY_DATABASE_URI = "postgresql://localhost:5432/fight_simulator_test"
DEBUG = False
SECRET_KEY = "Not secret"
class TravisConfig(object):
SQLALCHEMY_DATABASE_URI = "postgresql://localhost:5432/fight_simulator_test"
DEBUG = False
SECRET_KEY = "Not secret"
| []
| []
| [
"FIGHT_SIMULATOR_SECRET_KEY"
]
| [] | ["FIGHT_SIMULATOR_SECRET_KEY"] | python | 1 | 0 | |
evaluate3.py | import keras
from keras.models import load_model
import os
os.environ['KERAS_BACKEND' ] = 'tensorflow'
os.environ['MKL_THREADING_LAYER'] = 'GNU'
from agent3.agent import Agent
from functions import *
import sys
if len(sys.argv) != 3:
print("Usage: python evaluate.py [stock] [model]")
exit()
stock_name, model_name = sys.argv[1], sys.argv[2]
model = load_model("models/agent3" + model_name)
window_size = model.layers[0].input.shape.as_list()[1]
agent = Agent(window_size, True, model_name)
data = getStockDataVec(stock_name)
l = len(data) - 1
batch_size = 32
state = getState(data, 0, window_size + 1)
total_profit = 0
agent.inventory = []
starting_balance = 1500.00 #10.00000
print('starting balance {}'.format(starting_balance))
for t in range(l):
action = agent.act(state)
# sit
next_state = getState(data, t + 1, window_size + 1)
reward = 0
if action == 1: # buy
agent.inventory.append(data[t])
print("Buy: " + formatPrice(data[t]))
elif action == 2 and len(agent.inventory) > 0: # sell
bought_price = agent.inventory.pop(0)
reward = max(data[t] - bought_price, 0)
total_profit += data[t] - bought_price
print("Sell: " + formatPrice(data[t]) + " | Profit: " + formatPrice(data[t] - bought_price))
print("--------------------------------")
print("Total Profit: " + formatPrice(total_profit))
print("--------------------------------")
done = True if t == l - 1 else False
agent.memory.append((state, action, reward, next_state, done))
state = next_state
if done:
print("--------------------------------")
print(stock_name + " Total Profit: " + formatPrice(total_profit))
print("ENDING BALANCE ${:.2f}".format(starting_balance+total_profit))
print("--------------------------------")
| []
| []
| [
"KERAS_BACKEND'",
"MKL_THREADING_LAYER"
]
| [] | ["KERAS_BACKEND'", "MKL_THREADING_LAYER"] | python | 2 | 0 | |
internal/lsp/testdata/missingfunction/slice.go | package missingfunction
func slice() {
undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix")
}
| []
| []
| []
| [] | [] | go | null | null | null |
examples/hello-world/order.go | package main
import (
"context"
"fmt"
"os"
"strconv"
"time"
"gopkg.in/alecthomas/kingpin.v2"
dapr "github.com/dapr/go-sdk/client"
)
const (
stateStoreName = `statestore`
daprPort = "3500"
)
var port string
func init() {
if port = os.Getenv("DAPR_GRPC_PORT"); len(port) == 0 {
port = daprPort
}
}
func main() {
var orderID int
put := kingpin.Command("put", "Send a new order.")
put.Flag("id", "order ID.").Default("1").IntVar(&orderID)
kingpin.Command("get", "Get current order.")
kingpin.Command("del", "Delete the order.")
kingpin.Command("seq", "Stream sequence of orders.")
// create the client
client, err := dapr.NewClientWithPort(port)
if err != nil {
panic(err)
}
defer client.Close()
ctx := context.Background()
switch kingpin.Parse() {
case "get":
fmt.Printf("Getting order\n")
item, err := client.GetState(ctx, stateStoreName, "order")
if err != nil {
fmt.Printf("Failed to get state: %v\n", err)
}
if len(item.Value) > 0 {
fmt.Printf("Order ID %s\n", item.Value)
} else {
fmt.Printf("Order Not Found\n")
}
case "put":
fmt.Printf("Sending order ID %d\n", orderID)
err := client.SaveState(ctx, stateStoreName, "order", []byte(strconv.Itoa(orderID)))
if err != nil {
fmt.Printf("Failed to persist state: %v\n", err)
} else {
fmt.Printf("Successfully persisted state\n")
}
case "del":
fmt.Printf("Deleting order\n")
err := client.DeleteState(ctx, stateStoreName, "order")
if err != nil {
fmt.Printf("Failed to delete state: %v\n", err)
} else {
fmt.Printf("Successfully deleted state\n")
}
case "seq":
fmt.Printf("Streaming sequence of orders\n")
for {
orderID++
err := client.SaveState(ctx, stateStoreName, "order", []byte(strconv.Itoa(orderID)))
if err != nil {
fmt.Printf("Failed to persist state: %v\n", err)
break
}
time.Sleep(1 * time.Second)
}
}
}
| [
"\"DAPR_GRPC_PORT\""
]
| []
| [
"DAPR_GRPC_PORT"
]
| [] | ["DAPR_GRPC_PORT"] | go | 1 | 0 | |
src/blade/blade_util.py | # Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Huan Yu <[email protected]>
# Feng chen <[email protected]>
# Yi Wang <[email protected]>
# Chong peng <[email protected]>
# Date: October 20, 2011
"""
This is the util module which provides command functions.
"""
import fcntl
import os
import string
import subprocess
import console
try:
import hashlib as md5
except ImportError:
import md5
def md5sum_str(user_str):
"""md5sum of basestring. """
m = md5.md5()
if not isinstance(user_str, basestring):
console.error_exit('not a valid basestring type to caculate md5')
m.update(user_str)
return m.hexdigest()
def md5sum(obj):
"""caculate md5sum and returns it. """
return md5sum_str(obj)
def lock_file(fd, flags):
"""lock file. """
try:
fcntl.flock(fd, flags)
return (True, 0)
except IOError, ex_value:
return (False, ex_value[0])
def unlock_file(fd):
"""unlock file. """
try:
fcntl.flock(fd, fcntl.LOCK_UN)
return (True, 0)
except IOError, ex_value:
return (False, ex_value[0])
def var_to_list(var):
"""change the var to be a list. """
if isinstance(var, list):
return var
if not var:
return []
return [var]
def relative_path(a_path, reference_path):
"""_relative_path.
Get the relative path of a_path by considering reference_path as the
root directory. For example, if
reference_path = '/src/paralgo'
a_path = '/src/paralgo/mapreduce_lite/sorted_buffer'
then
_relative_path(a_path, reference_path) = 'mapreduce_lite/sorted_buffer'
"""
if not a_path:
raise ValueError('no path specified')
# Count the number of segments shared by reference_path and a_path.
reference_list = os.path.abspath(reference_path).split(os.path.sep)
path_list = os.path.abspath(a_path).split(os.path.sep)
i = 0
for i in range(min(len(reference_list), len(path_list))):
# TODO(yiwang): Why use lower here?
if reference_list[i].lower() != path_list[i].lower():
break
else:
# TODO(yiwnag): Why do not move i+=1 out from the loop?
i += 1
rel_list = [os.path.pardir] * (len(reference_list) - i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def get_cwd():
"""get_cwd
os.getcwd() doesn't work because it will follow symbol link.
os.environ.get('PWD') doesn't work because it won't reflect os.chdir().
So in practice we simply use system('pwd') to get current working directory.
"""
p = subprocess.Popen(['pwd'], stdout=subprocess.PIPE, shell=True)
return p.communicate()[0].strip()
def environ_add_path(env, key, path):
"""Add path to PATH link environments, sucn as PATH, LD_LIBRARY_PATH, etc"""
old = env.get(key)
if old:
env[key] = old + ':' + path
else:
env[key] = path
def cpu_count():
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError:
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
def regular_variable_name(var):
"""regular_variable_name.
Parameters
-----------
var: the variable to be modified
Returns
-----------
s: the variable modified
Description
-----------
Replace the chars that scons doesn't regconize.
"""
return var.translate(string.maketrans(',-/.+*', '______'))
| []
| []
| [
"PWD"
]
| [] | ["PWD"] | python | 1 | 0 | |
django/core/servers/basehttp.py | """
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import mimetypes
import os
import re
import stat
import sys
import urllib
from django.utils.http import http_date
from django.utils._os import safe_join
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if not isinstance(headers, list):
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""
Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will want to
redefine this method, such that it sets up callbacks in the event loop
to iterate over the data, and to call 'self.close()' once the response
is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError, AttributeError, NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % http_date()
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException, e
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
sys.stderr.write("[%s] %s\n" % (self.log_date_time_string(), format % args))
class AdminMediaHandler(object):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
"""
def __init__(self, application, media_dir=None):
from django.conf import settings
self.application = application
if not media_dir:
import django
self.media_dir = \
os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
else:
self.media_dir = media_dir
self.media_url = settings.ADMIN_MEDIA_PREFIX
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ADMIN_MEDIA_PREFIX. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ADMIN_MEDIA_PREFIX.
relative_url = url[len(self.media_url):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.media_dir, relative_path)
def __call__(self, environ, start_response):
import os.path
# Ignore requests that aren't under ADMIN_MEDIA_PREFIX. Also ignore
# all requests if ADMIN_MEDIA_PREFIX isn't a relative URL.
if self.media_url.startswith('http://') or self.media_url.startswith('https://') \
or not environ['PATH_INFO'].startswith(self.media_url):
return self.application(environ, start_response)
# Find the admin file and serve it up, if it exists and is readable.
try:
file_path = self.file_path(environ['PATH_INFO'])
except ValueError: # Resulting file path was not valid.
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
start_response(status, headers.items())
return output
if not os.path.exists(file_path):
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
else:
try:
fp = open(file_path, 'rb')
except IOError:
status = '401 UNAUTHORIZED'
headers = {'Content-type': 'text/plain'}
output = ['Permission denied: %s' % environ['PATH_INFO']]
else:
# This is a very simple implementation of conditional GET with
# the Last-Modified header. It makes media files a bit speedier
# because the files are only read off disk for the first
# request (assuming the browser/client supports conditional
# GET).
mtime = http_date(os.stat(file_path)[stat.ST_MTIME])
headers = {'Last-Modified': mtime}
if environ.get('HTTP_IF_MODIFIED_SINCE', None) == mtime:
status = '304 NOT MODIFIED'
output = []
else:
status = '200 OK'
mime_type = mimetypes.guess_type(file_path)[0]
if mime_type:
headers['Content-Type'] = mime_type
output = [fp.read()]
fp.close()
start_response(status, headers.items())
return output
def run(addr, port, wsgi_handler):
server_address = (addr, port)
httpd = WSGIServer(server_address, WSGIRequestHandler)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
testutil/request/request.go | package request // import "github.com/docker/docker/testutil/request"
import (
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/testutil/environment"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"gotest.tools/v3/assert"
)
// NewAPIClient returns a docker API client configured from environment variables
func NewAPIClient(t testing.TB, ops ...client.Opt) client.APIClient {
t.Helper()
ops = append([]client.Opt{client.FromEnv}, ops...)
clt, err := client.NewClientWithOpts(ops...)
assert.NilError(t, err)
return clt
}
// DaemonTime provides the current time on the daemon host
func DaemonTime(ctx context.Context, t testing.TB, client client.APIClient, testEnv *environment.Execution) time.Time {
t.Helper()
if testEnv.IsLocalDaemon() {
return time.Now()
}
info, err := client.Info(ctx)
assert.NilError(t, err)
dt, err := time.Parse(time.RFC3339Nano, info.SystemTime)
assert.NilError(t, err, "invalid time format in GET /info response")
return dt
}
// DaemonUnixTime returns the current time on the daemon host with nanoseconds precision.
// It return the time formatted how the client sends timestamps to the server.
func DaemonUnixTime(ctx context.Context, t testing.TB, client client.APIClient, testEnv *environment.Execution) string {
t.Helper()
dt := DaemonTime(ctx, t, client, testEnv)
return fmt.Sprintf("%d.%09d", dt.Unix(), int64(dt.Nanosecond()))
}
// Post creates and execute a POST request on the specified host and endpoint, with the specified request modifiers
func Post(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, append(modifiers, Method(http.MethodPost))...)
}
// Delete creates and execute a DELETE request on the specified host and endpoint, with the specified request modifiers
func Delete(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, append(modifiers, Method(http.MethodDelete))...)
}
// Get creates and execute a GET request on the specified host and endpoint, with the specified request modifiers
func Get(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, modifiers...)
}
// Head creates and execute a HEAD request on the specified host and endpoint, with the specified request modifiers
func Head(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, append(modifiers, Method(http.MethodHead))...)
}
// Do creates and execute a request on the specified endpoint, with the specified request modifiers
func Do(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) {
opts := &Options{
host: DaemonHost(),
}
for _, mod := range modifiers {
mod(opts)
}
req, err := newRequest(endpoint, opts)
if err != nil {
return nil, nil, err
}
client, err := newHTTPClient(opts.host)
if err != nil {
return nil, nil, err
}
resp, err := client.Do(req)
var body io.ReadCloser
if resp != nil {
body = ioutils.NewReadCloserWrapper(resp.Body, func() error {
defer resp.Body.Close()
return nil
})
}
return resp, body, err
}
// ReadBody read the specified ReadCloser content and returns it
func ReadBody(b io.ReadCloser) ([]byte, error) {
defer b.Close()
return ioutil.ReadAll(b)
}
// newRequest creates a new http Request to the specified host and endpoint, with the specified request modifiers
func newRequest(endpoint string, opts *Options) (*http.Request, error) {
hostURL, err := client.ParseHostURL(opts.host)
if err != nil {
return nil, errors.Wrapf(err, "failed parsing url %q", opts.host)
}
req, err := http.NewRequest(http.MethodGet, endpoint, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to create request")
}
if os.Getenv("DOCKER_TLS_VERIFY") != "" {
req.URL.Scheme = "https"
} else {
req.URL.Scheme = "http"
}
req.URL.Host = hostURL.Host
for _, config := range opts.requestModifiers {
if err := config(req); err != nil {
return nil, err
}
}
return req, nil
}
// newHTTPClient creates an http client for the specific host
// TODO: Share more code with client.defaultHTTPClient
func newHTTPClient(host string) (*http.Client, error) {
// FIXME(vdemeester) 10*time.Second timeout of SockRequest… ?
hostURL, err := client.ParseHostURL(host)
if err != nil {
return nil, err
}
transport := new(http.Transport)
if hostURL.Scheme == "tcp" && os.Getenv("DOCKER_TLS_VERIFY") != "" {
// Setup the socket TLS configuration.
tlsConfig, err := getTLSConfig()
if err != nil {
return nil, err
}
transport = &http.Transport{TLSClientConfig: tlsConfig}
}
transport.DisableKeepAlives = true
err = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
return &http.Client{Transport: transport}, err
}
func getTLSConfig() (*tls.Config, error) {
dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
if dockerCertPath == "" {
return nil, errors.New("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable")
}
option := &tlsconfig.Options{
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
}
tlsConfig, err := tlsconfig.Client(*option)
if err != nil {
return nil, err
}
return tlsConfig, nil
}
// DaemonHost return the daemon host string for this test execution
func DaemonHost() string {
daemonURLStr := client.DefaultDockerHost
if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" {
daemonURLStr = daemonHostVar
}
return daemonURLStr
}
// SockConn opens a connection on the specified socket
func SockConn(timeout time.Duration, daemon string) (net.Conn, error) {
daemonURL, err := url.Parse(daemon)
if err != nil {
return nil, errors.Wrapf(err, "could not parse url %q", daemon)
}
var c net.Conn
switch daemonURL.Scheme {
case "npipe":
return npipeDial(daemonURL.Path, timeout)
case "unix":
return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout)
case "tcp":
if os.Getenv("DOCKER_TLS_VERIFY") != "" {
// Setup the socket TLS configuration.
tlsConfig, err := getTLSConfig()
if err != nil {
return nil, err
}
dialer := &net.Dialer{Timeout: timeout}
return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig)
}
return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout)
default:
return c, errors.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon)
}
}
| [
"\"DOCKER_TLS_VERIFY\"",
"\"DOCKER_TLS_VERIFY\"",
"\"DOCKER_CERT_PATH\"",
"\"DOCKER_HOST\"",
"\"DOCKER_TLS_VERIFY\""
]
| []
| [
"DOCKER_HOST",
"DOCKER_CERT_PATH",
"DOCKER_TLS_VERIFY"
]
| [] | ["DOCKER_HOST", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY"] | go | 3 | 0 | |
libweasyl/libweasyl/conftest.py | import os
import pytest
import sqlalchemy as sa
from libweasyl.configuration import configure_libweasyl
from libweasyl.models.meta import registry
from libweasyl.models.tables import metadata
from libweasyl.test.common import NotFound
from libweasyl.test.common import media_link_formatter
from libweasyl import cache
engine = sa.create_engine(os.environ.get('WEASYL_TEST_SQLALCHEMY_URL', 'postgresql+psycopg2cffi:///weasyl_test'))
sessionmaker = sa.orm.scoped_session(sa.orm.sessionmaker(bind=engine))
@pytest.fixture(scope='session', autouse=True)
def setup(request):
db = sessionmaker()
db.execute('DROP SCHEMA public CASCADE')
db.execute('CREATE SCHEMA public')
db.execute('CREATE EXTENSION HSTORE')
db.commit()
metadata.create_all(engine)
cache.region.configure('dogpile.cache.memory')
@pytest.fixture(autouse=True)
def staticdir(tmpdir):
tmpdir = tmpdir.join('libweasyl-staticdir')
configure_libweasyl(
dbsession=sessionmaker,
not_found_exception=NotFound,
base_file_path=tmpdir.strpath,
staff_config_dict={},
media_link_formatter_callback=media_link_formatter.format_media_link,
)
return tmpdir
@pytest.fixture
def db(request):
db = sessionmaker()
# If a previous test has failed due to an SQL problem, the session will be
# in a broken state, requiring a rollback. It's not harmful to
# unconditionally rollback, so just do that.
db.rollback()
def tear_down():
"Clears all rows from the test database."
for k, cls in registry.items():
if not k[0].isupper():
continue
db.query(cls).delete()
db.flush()
db.commit()
request.addfinalizer(tear_down)
return db
| []
| []
| [
"WEASYL_TEST_SQLALCHEMY_URL"
]
| [] | ["WEASYL_TEST_SQLALCHEMY_URL"] | python | 1 | 0 | |
core/src/main/java/uk/dansiviter/gcp/ResourceType.java | /*
* Copyright 2019-2021 Daniel Siviter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.dansiviter.gcp;
import static java.lang.System.getenv;
import static java.util.Objects.isNull;
import static java.util.Objects.requireNonNull;
import static uk.dansiviter.gcp.AtomicInit.atomic;
import static uk.dansiviter.gcp.ResourceType.Label.CLUSTER_NAME;
import static uk.dansiviter.gcp.ResourceType.Label.CONTAINER_NAME;
import static uk.dansiviter.gcp.ResourceType.Label.INSTANCE_ID;
import static uk.dansiviter.gcp.ResourceType.Label.LOCATION;
import static uk.dansiviter.gcp.ResourceType.Label.MODULE_ID;
import static uk.dansiviter.gcp.ResourceType.Label.NAMESPACE_NAME;
import static uk.dansiviter.gcp.ResourceType.Label.POD_NAME;
import static uk.dansiviter.gcp.ResourceType.Label.PROJECT_ID;
import static uk.dansiviter.gcp.ResourceType.Label.REVISION_NAME;
import static uk.dansiviter.gcp.ResourceType.Label.SERVICE_NAME;
import static uk.dansiviter.gcp.ResourceType.Label.VERSION_ID;
import static uk.dansiviter.gcp.ResourceType.Label.ZONE;
import java.util.Arrays;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Supplier;
import com.google.cloud.MetadataConfig;
import com.google.cloud.MonitoredResource;
import com.google.cloud.ServiceOptions;
/**
* Utility to create a {@link MonitoredResource} based on the Cloud Operations Suite documentation. It will attempt to
* load the data from the environment but all values can be overridden via System properties (prefix
* {@code gcp.cloud.resource.}) or environment parameter (in uppercase). This is inspired by
* {@code com.google.cloud.logging.MonitoredResourceUtil} but more flexible.
*
* @author Daniel Siviter
* @since v1.0 [6 Dec 2019]
* @see <a href="https://cloud.google.com/monitoring">Cloud Monitoring</a>
* @see <a href="https://cloud.google.com/logging">Cloud Logging</a>
*/
public enum ResourceType {
/**
* A virtual machine instance hosted in Google Compute Engine (GCE).
*/
GCE_INSTANCE("gce_instance", PROJECT_ID, INSTANCE_ID, ZONE),
/**
* An application running in Google App Engine (GAE).
*/
GAE_APP("gae_app", PROJECT_ID, MODULE_ID, VERSION_ID),
/**
* An application running in Google App Engine (GAE) Flex.
*/
GAE_APP_FLEX("gae_app_flex", PROJECT_ID, MODULE_ID, VERSION_ID, ZONE),
/**
* A Kubernetes container instance.
* <p>
* This has replaced 'container' for logs and 'gke_container' for metrics:
* https://cloud.google.com/monitoring/kubernetes-engine/migration#what-is-changing
*/
K8S_CONTAINER("k8s_container", PROJECT_ID, LOCATION, CLUSTER_NAME, NAMESPACE_NAME, POD_NAME, CONTAINER_NAME),
/**
* A revision in Cloud Run (fully managed).
*/
CLOUD_RUN("cloud_run_revision", REVISION_NAME, SERVICE_NAME, LOCATION),
/**
* A resource type that is not associated with any specific resource.
*/
GLOBAL("global", PROJECT_ID);
private final String name;
private final Label[] labels;
private ResourceType(String name, Label... labels) {
this.name = requireNonNull(name);
this.labels = labels;
}
/**
* @return the monitored resource instance for this resource type.
*/
public MonitoredResource toMonitoredResource() {
return monitoredResource(this);
}
/**
* @param override ability to override the default values.
* @return the monitored resource instance for this resource type.
*/
public MonitoredResource toMonitoredResource(Function<String, Optional<String>> override) {
return monitoredResource(this, override);
}
// --- Static Methods ---
/**
* @param name name of resource type.
* @return the found resource.
* @throws IllegalArgumentException if resource not found.
*/
public static ResourceType fromString(String name) {
for (var type : values()) {
if (type.name.equalsIgnoreCase(name)) {
return type;
}
}
return valueOf(name);
}
/**
* Attempts to auto-detect resource type.
*
* @return the resource type.
*/
public static Optional<ResourceType> autoDetect() {
if (getenv("K_SERVICE") != null
&& getenv("K_REVISION") != null
&& getenv("K_CONFIGURATION") != null
&& getenv("KUBERNETES_SERVICE_HOST") == null)
{
return Optional.of(CLOUD_RUN);
}
if (System.getenv("GAE_INSTANCE") != null) {
return Optional.of(GAE_APP_FLEX);
}
if (System.getenv("KUBERNETES_SERVICE_HOST") != null) {
return Optional.of(K8S_CONTAINER);
}
if (ServiceOptions.getAppEngineAppId() != null) {
return Optional.of(GAE_APP);
}
if (MetadataConfig.getInstanceId() != null) {
return Optional.of(GCE_INSTANCE);
}
return Optional.empty();
}
/**
* @return the created monitored instance.
*/
public static MonitoredResource monitoredResource() {
return monitoredResource(n -> Optional.empty());
}
/**
* @param override ability to override the default values.
* @return the created monitored instance.
*/
public static MonitoredResource monitoredResource(Function<String, Optional<String>> override) {
return monitoredResource(autoDetect().orElse(GLOBAL), override);
}
/**
* @param type the resource type;
* @return the created monitored instance.
*/
public static MonitoredResource monitoredResource(ResourceType type) {
return monitoredResource(type, n -> Optional.empty());
}
/**
* @param type the resource type;
* @param override ability to override the default values.
* @return the created monitored instance.
*/
public static MonitoredResource monitoredResource(ResourceType type, Function<String, Optional<String>> override) {
var builder = MonitoredResource.newBuilder(type.name);
Arrays.asList(type.labels).forEach(l -> {
var value = override.apply(l.name);
value.ifPresentOrElse(
v -> builder.addLabel(l.name, v),
() -> l.get().ifPresent(v -> builder.addLabel(l.name, v)));
});
return builder.build();
}
/**
* Extracts the value for the monitored resource label.
*
* @param resource the resource to use.
* @param key the key of the label.
* @return the value.
*/
public static Optional<String> label(MonitoredResource resource, Label key) {
return Optional.ofNullable(resource.getLabels().get(key.name));
}
/**
* Extracts the {@code project_id}.
*
* @param resource the resource to use.
* @return the value.
*/
public static Optional<String> projectId(MonitoredResource resource) {
return Label.PROJECT_ID.get(resource);
}
// --- Inner Classes ---
/**
* @see <a href="https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#gke_mds">GKE MDS</a>
*/
public enum Label {
/**
* The identifier of the GCP project associated with this resource, such as
* "my-project".
*/
PROJECT_ID("project_id", true, ServiceOptions::getDefaultProjectId),
/**
* The numeric VM instance identifier assigned by Compute Engine.
*/
INSTANCE_ID("instance_id", MetadataConfig::getInstanceId),
/**
* The Compute Engine zone in which the VM is running.
*/
ZONE("zone", MetadataConfig::getZone),
/**
* The service/module name.
*/
MODULE_ID("module_id", () -> getenv("GAE_SERVICE")),
/**
* The version name.
*/
VERSION_ID("version_id", () -> getenv("GAE_VERSION")),
/**
* The physical location of the cluster that contains the container.
* <p>
* This relates to the master node rather than the pod.
* https://cloud.google.com/monitoring/kubernetes-engine/migration#resource_type_changes
*/
LOCATION("location", Label::getLocation),
/**
* The name of the cluster that the container is running in.
*/
CLUSTER_NAME("cluster_name", MetadataConfig::getClusterName),
/**
* The name of the namespace that the container is running in.
*/
NAMESPACE_NAME("namespace_name", MetadataConfig::getNamespaceId),
/**
* The name of the pod that the container is running in.
*/
POD_NAME("pod_name", () -> getenv("HOSTNAME")),
/**
* The name of the container.
*/
CONTAINER_NAME("container_name", MetadataConfig::getContainerName),
/**
* Cloud Run/Knative revision.
*/
REVISION_NAME("revision_name", () -> getenv("K_REVISION")),
/**
* K8s/Cloud Run/Knative service name.
*/
SERVICE_NAME("service_name", () -> getenv("K_SERVICE"));
private final String name;
private final Supplier<Optional<String>> supplier;
private Label(String name, Supplier<String> supplier) {
this(name, false, supplier);
}
private Label(String name, boolean cache, Supplier<String> supplier) {
this.name = requireNonNull(name);
Supplier<Optional<String>> delegate = () -> get(name, supplier);
this.supplier = cache ? atomic(delegate) : delegate;
}
public Optional<String> get() {
return this.supplier.get();
}
/**
* @return the value of the label.
*/
public static Optional<String> get(String name, Supplier<String> supplier) {
var value = Optional.ofNullable(getenv(name.toUpperCase()));
if (value.isPresent()) {
return value;
}
value = Optional.ofNullable(System.getProperty("gcp.cloud.resource.".concat(name)));
if (value.isPresent()) {
return value;
}
var strValue = supplier.get();
if (!isNull(strValue) && !strValue.isEmpty()) {
return Optional.of(strValue);
}
return Optional.empty();
}
/**
* @param resource the resource to extract from.
* @return the value.
*/
public Optional<String> get(MonitoredResource resource) {
return ResourceType.label(resource, this);
}
private static String getLocation() {
var zone = MetadataConfig.getZone();
if (zone != null && zone.endsWith("-1")) {
return zone.substring(0, zone.length() - 2);
}
return zone;
}
}
}
| [
"\"GAE_INSTANCE\"",
"\"KUBERNETES_SERVICE_HOST\""
]
| []
| [
"KUBERNETES_SERVICE_HOST",
"GAE_INSTANCE"
]
| [] | ["KUBERNETES_SERVICE_HOST", "GAE_INSTANCE"] | java | 2 | 0 | |
modules/openapi-generator/src/main/java/org/openapitools/codegen/languages/AbstractCSharpCodegen.java | /*
* Copyright 2018 OpenAPI-Generator Contributors (https://openapi-generator.tech)
* Copyright 2018 SmartBear Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openapitools.codegen.languages;
import com.google.common.collect.ImmutableMap;
import com.samskivert.mustache.Mustache.Lambda;
import io.swagger.v3.core.util.Json;
import io.swagger.v3.oas.models.media.ArraySchema;
import io.swagger.v3.oas.models.media.Schema;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.StringUtils;
import org.openapitools.codegen.*;
import org.openapitools.codegen.templating.mustache.*;
import org.openapitools.codegen.utils.ModelUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.*;
import static org.openapitools.codegen.utils.StringUtils.camelize;
public abstract class AbstractCSharpCodegen extends DefaultCodegen implements CodegenConfig {
protected boolean optionalAssemblyInfoFlag = true;
protected boolean optionalEmitDefaultValuesFlag = false;
protected boolean conditionalSerialization = false;
protected boolean optionalProjectFileFlag = true;
protected boolean optionalMethodArgumentFlag = true;
protected boolean useDateTimeOffsetFlag = false;
protected boolean useCollection = false;
protected boolean returnICollection = false;
protected boolean netCoreProjectFileFlag = false;
protected boolean nullReferenceTypesFlag = false;
protected String modelPropertyNaming = CodegenConstants.MODEL_PROPERTY_NAMING_TYPE.PascalCase.name();
protected String licenseUrl = "http://localhost";
protected String licenseName = "NoLicense";
protected String packageVersion = "1.0.0";
protected String packageName = "Org.OpenAPITools";
protected String packageTitle = "OpenAPI Library";
protected String packageProductName = "OpenAPILibrary";
protected String packageDescription = "A library generated from a OpenAPI doc";
protected String packageCompany = "OpenAPI";
protected String packageCopyright = "No Copyright";
protected String packageAuthors = "OpenAPI";
protected String interfacePrefix = "I";
protected String enumNameSuffix = "Enum";
protected String enumValueSuffix = "Enum";
protected String sourceFolder = "src";
// TODO: Add option for test folder output location. Nice to allow e.g. ./test instead of ./src.
// This would require updating relative paths (e.g. path to main project file in test project file)
protected String testFolder = sourceFolder;
protected Set<String> collectionTypes;
protected Set<String> mapTypes;
// true if support nullable type
protected boolean supportNullable = Boolean.FALSE;
// nullable type
protected Set<String> nullableType = new HashSet<String>();
protected Set<String> valueTypes = new HashSet<String>();
private final Logger LOGGER = LoggerFactory.getLogger(AbstractCSharpCodegen.class);
// special property keywords not allowed as these are the function names in the model files
protected Set<String> propertySpecialKeywords = new HashSet<>(Arrays.asList("ToString", "ToJson", "GetHashCode", "Equals", "ShouldSerializeToString"));
// A cache to efficiently lookup schema `toModelName()` based on the schema Key
private Map<String, String> schemaKeyToModelNameCache = new HashMap<>();
public AbstractCSharpCodegen() {
super();
supportsInheritance = true;
// C# does not use import mapping
importMapping.clear();
outputFolder = "generated-code" + File.separator + this.getName();
embeddedTemplateDir = templateDir = this.getName();
collectionTypes = new HashSet<String>(
Arrays.asList(
"IList", "List",
"ICollection", "Collection",
"IEnumerable")
);
mapTypes = new HashSet<String>(
Arrays.asList("IDictionary")
);
// NOTE: C# uses camel cased reserved words, while models are title cased. We don't want lowercase comparisons.
reservedWords.addAll(
Arrays.asList(
// set "client" as a reserved word to avoid conflicts with Org.OpenAPITools.Client
// this is a workaround and can be removed if c# api client is updated to use
// fully qualified name
"Client", "client", "parameter", "Configuration", "Version",
// local variable names in API methods (endpoints)
"localVarPath", "localVarPathParams", "localVarQueryParams", "localVarHeaderParams",
"localVarFormParams", "localVarFileParams", "localVarStatusCode", "localVarResponse",
"localVarPostBody", "localVarHttpHeaderAccepts", "localVarHttpHeaderAccept",
"localVarHttpContentTypes", "localVarHttpContentType",
"localVarStatusCode",
// C# reserved words
"abstract", "as", "base", "bool", "break", "byte", "case", "catch", "char", "checked",
"class", "const", "continue", "decimal", "default", "delegate", "do", "double", "else",
"enum", "event", "explicit", "extern", "false", "finally", "fixed", "float", "for",
"foreach", "goto", "if", "implicit", "in", "int", "interface", "internal", "is", "lock",
"long", "namespace", "new", "null", "object", "operator", "out", "override", "params",
"private", "protected", "public", "readonly", "ref", "return", "sbyte", "sealed",
"short", "sizeof", "stackalloc", "static", "string", "struct", "switch", "this", "throw",
"true", "try", "typeof", "uint", "ulong", "unchecked", "unsafe", "ushort", "using",
"virtual", "void", "volatile", "while")
);
// TODO: Either include fully qualified names here or handle in DefaultCodegen via lastIndexOf(".") search
languageSpecificPrimitives = new HashSet<String>(
Arrays.asList(
"String",
"string",
"bool?",
"bool",
"double?",
"double",
"decimal?",
"decimal",
"int?",
"int",
"long?",
"long",
"float?",
"float",
"byte[]",
"ICollection",
"Collection",
"List",
"Dictionary",
"DateTime?",
"DateTime",
"DateTimeOffset?",
"DateTimeOffset",
"Boolean",
"Double",
"Decimal",
"Int32",
"Int64",
"Float",
"Guid?",
"Guid",
"System.IO.Stream", // not really a primitive, we include it to avoid model import
"Object")
);
instantiationTypes.put("array", "List");
instantiationTypes.put("list", "List");
instantiationTypes.put("map", "Dictionary");
// Nullable types here assume C# 2 support is not part of base
typeMapping = new HashMap<String, String>();
typeMapping.put("string", "string");
typeMapping.put("binary", "byte[]");
typeMapping.put("ByteArray", "byte[]");
typeMapping.put("boolean", "bool?");
typeMapping.put("integer", "int?");
typeMapping.put("float", "float?");
typeMapping.put("long", "long?");
typeMapping.put("double", "double?");
typeMapping.put("number", "decimal?");
typeMapping.put("BigDecimal", "decimal?");
typeMapping.put("DateTime", "DateTime?");
typeMapping.put("date", "DateTime?");
typeMapping.put("file", "System.IO.Stream");
typeMapping.put("array", "List");
typeMapping.put("list", "List");
typeMapping.put("map", "Dictionary");
typeMapping.put("object", "Object");
typeMapping.put("UUID", "Guid?");
typeMapping.put("URI", "string");
typeMapping.put("AnyType", "Object");
// nullable type
nullableType = new HashSet<String>(
Arrays.asList("decimal", "bool", "int", "float", "long", "double", "DateTime", "DateTimeOffset", "Guid")
);
// value Types
valueTypes = new HashSet<String>(
Arrays.asList("decimal", "bool", "int", "float", "long", "double")
);
}
public void setReturnICollection(boolean returnICollection) {
this.returnICollection = returnICollection;
}
public void setUseCollection(boolean useCollection) {
this.useCollection = useCollection;
if (useCollection) {
typeMapping.put("array", "Collection");
typeMapping.put("list", "Collection");
instantiationTypes.put("array", "Collection");
instantiationTypes.put("list", "Collection");
}
}
public void setOptionalMethodArgumentFlag(boolean flag) {
this.optionalMethodArgumentFlag = flag;
}
public void setNetCoreProjectFileFlag(boolean flag) {
this.netCoreProjectFileFlag = flag;
}
public void useDateTimeOffset(boolean flag) {
this.useDateTimeOffsetFlag = flag;
if (flag) {
typeMapping.put("DateTime", "DateTimeOffset");
} else {
typeMapping.put("DateTime", "DateTime");
}
}
@Override
public void processOpts() {
super.processOpts();
if (StringUtils.isEmpty(System.getenv("CSHARP_POST_PROCESS_FILE"))) {
LOGGER.info("Environment variable CSHARP_POST_PROCESS_FILE not defined so the C# code may not be properly formatted by uncrustify (0.66 or later) or other code formatter. To define it, try `export CSHARP_POST_PROCESS_FILE=\"/usr/local/bin/uncrustify --no-backup\" && export UNCRUSTIFY_CONFIG=/path/to/uncrustify-rules.cfg` (Linux/Mac). Note: replace /path/to with the location of uncrustify-rules.cfg");
LOGGER.info("NOTE: To enable file post-processing, 'enablePostProcessFile' must be set to `true` (--enable-post-process-file for CLI).");
}
// License info
if (additionalProperties.containsKey(CodegenConstants.LICENSE_URL)) {
setLicenseUrl((String) additionalProperties.get(CodegenConstants.LICENSE_URL));
} else {
additionalProperties.put(CodegenConstants.LICENSE_URL, this.licenseUrl);
}
if (additionalProperties.containsKey(CodegenConstants.LICENSE_NAME)) {
setLicenseName((String) additionalProperties.get(CodegenConstants.LICENSE_NAME));
} else {
additionalProperties.put(CodegenConstants.LICENSE_NAME, this.licenseName);
}
// {{packageVersion}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_VERSION)) {
setPackageVersion((String) additionalProperties.get(CodegenConstants.PACKAGE_VERSION));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_VERSION, packageVersion);
}
// {{sourceFolder}}
if (additionalProperties.containsKey(CodegenConstants.SOURCE_FOLDER)) {
setSourceFolder((String) additionalProperties.get(CodegenConstants.SOURCE_FOLDER));
} else {
additionalProperties.put(CodegenConstants.SOURCE_FOLDER, this.sourceFolder);
}
// {{packageName}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_NAME)) {
setPackageName((String) additionalProperties.get(CodegenConstants.PACKAGE_NAME));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_NAME, packageName);
}
if (additionalProperties.containsKey(CodegenConstants.INVOKER_PACKAGE)) {
LOGGER.warn(String.format(Locale.ROOT, "%s is not used by C# generators. Please use %s",
CodegenConstants.INVOKER_PACKAGE, CodegenConstants.PACKAGE_NAME));
}
// {{packageTitle}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_TITLE)) {
setPackageTitle((String) additionalProperties.get(CodegenConstants.PACKAGE_TITLE));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_TITLE, packageTitle);
}
// {{packageProductName}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_PRODUCTNAME)) {
setPackageProductName((String) additionalProperties.get(CodegenConstants.PACKAGE_PRODUCTNAME));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_PRODUCTNAME, packageProductName);
}
// {{packageDescription}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_DESCRIPTION)) {
setPackageDescription((String) additionalProperties.get(CodegenConstants.PACKAGE_DESCRIPTION));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_DESCRIPTION, packageDescription);
}
// {{packageCompany}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_COMPANY)) {
setPackageCompany((String) additionalProperties.get(CodegenConstants.PACKAGE_COMPANY));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_COMPANY, packageCompany);
}
// {{packageCopyright}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_COPYRIGHT)) {
setPackageCopyright((String) additionalProperties.get(CodegenConstants.PACKAGE_COPYRIGHT));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_COPYRIGHT, packageCopyright);
}
// {{packageAuthors}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_AUTHORS)) {
setPackageAuthors((String) additionalProperties.get(CodegenConstants.PACKAGE_AUTHORS));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_AUTHORS, packageAuthors);
}
// {{useDateTimeOffset}}
if (additionalProperties.containsKey(CodegenConstants.USE_DATETIME_OFFSET)) {
useDateTimeOffset(convertPropertyToBooleanAndWriteBack(CodegenConstants.USE_DATETIME_OFFSET));
} else {
additionalProperties.put(CodegenConstants.USE_DATETIME_OFFSET, useDateTimeOffsetFlag);
}
if (additionalProperties.containsKey(CodegenConstants.USE_COLLECTION)) {
setUseCollection(convertPropertyToBooleanAndWriteBack(CodegenConstants.USE_COLLECTION));
} else {
additionalProperties.put(CodegenConstants.USE_COLLECTION, useCollection);
}
if (additionalProperties.containsKey(CodegenConstants.RETURN_ICOLLECTION)) {
setReturnICollection(convertPropertyToBooleanAndWriteBack(CodegenConstants.RETURN_ICOLLECTION));
} else {
additionalProperties.put(CodegenConstants.RETURN_ICOLLECTION, returnICollection);
}
if (additionalProperties.containsKey(CodegenConstants.NETCORE_PROJECT_FILE)) {
setNetCoreProjectFileFlag(convertPropertyToBooleanAndWriteBack(CodegenConstants.NETCORE_PROJECT_FILE));
} else {
additionalProperties.put(CodegenConstants.NETCORE_PROJECT_FILE, netCoreProjectFileFlag);
}
if (additionalProperties.containsKey(CodegenConstants.NULLABLE_REFERENCE_TYPES)) {
setNullableReferenceTypes(convertPropertyToBooleanAndWriteBack(CodegenConstants.NULLABLE_REFERENCE_TYPES));
} else {
additionalProperties.put(CodegenConstants.NULLABLE_REFERENCE_TYPES, nullReferenceTypesFlag);
}
if (additionalProperties.containsKey(CodegenConstants.INTERFACE_PREFIX)) {
String useInterfacePrefix = additionalProperties.get(CodegenConstants.INTERFACE_PREFIX).toString();
if ("false".equals(useInterfacePrefix.toLowerCase(Locale.ROOT))) {
setInterfacePrefix("");
} else if (!"true".equals(useInterfacePrefix.toLowerCase(Locale.ROOT))) {
// NOTE: if user passes "true" explicitly, we use the default I- prefix. The other supported case here is a custom prefix.
setInterfacePrefix(sanitizeName(useInterfacePrefix));
}
}
if (additionalProperties().containsKey(CodegenConstants.ENUM_NAME_SUFFIX)) {
setEnumNameSuffix(additionalProperties.get(CodegenConstants.ENUM_NAME_SUFFIX).toString());
}
if (additionalProperties().containsKey(CodegenConstants.ENUM_VALUE_SUFFIX)) {
setEnumValueSuffix(additionalProperties.get(CodegenConstants.ENUM_VALUE_SUFFIX).toString());
}
// This either updates additionalProperties with the above fixes, or sets the default if the option was not specified.
additionalProperties.put(CodegenConstants.INTERFACE_PREFIX, interfacePrefix);
}
@Override
protected ImmutableMap.Builder<String, Lambda> addMustacheLambdas() {
return super.addMustacheLambdas()
.put("camelcase_param", new CamelCaseLambda().generator(this).escapeAsParamName(true));
}
@Override
public void postProcessModelProperty(CodegenModel model, CodegenProperty property) {
super.postProcessModelProperty(model, property);
}
@Override
public Map<String, Object> postProcessModels(Map<String, Object> objs) {
List<Object> models = (List<Object>) objs.get("models");
for (Object _mo : models) {
Map<String, Object> mo = (Map<String, Object>) _mo;
CodegenModel cm = (CodegenModel) mo.get("model");
for (CodegenProperty var : cm.vars) {
// check to see if model name is same as the property name
// which will result in compilation error
// if found, prepend with _ to workaround the limitation
if (var.name.equalsIgnoreCase(cm.classname)) {
var.name = "_" + var.name;
}
}
}
// process enum in models
return postProcessModelsEnum(objs);
}
/**
* Invoked by {@link DefaultGenerator} after all models have been post-processed, allowing for a last pass of codegen-specific model cleanup.
*
* @param objs Current state of codegen object model.
* @return An in-place modified state of the codegen object model.
*/
@Override
public Map<String, Object> postProcessAllModels(Map<String, Object> objs) {
final Map<String, Object> processed = super.postProcessAllModels(objs);
postProcessEnumRefs(processed);
updateValueTypeProperty(processed);
updateNullableTypeProperty(processed);
return processed;
}
@Override
protected List<Map<String, Object>> buildEnumVars(List<Object> values, String dataType) {
List<Map<String, Object>> enumVars = super.buildEnumVars(values, dataType);
// this is needed for enumRefs like OuterEnum marked as nullable and also have string values
// keep isString true so that the index will be used as the enum value instead of a string
// this is inline with C# enums with string values
if ("string?".equals(dataType)){
enumVars.forEach((enumVar) -> {
enumVar.put("isString", true);
});
}
return enumVars;
}
/**
* C# differs from other languages in that Enums are not _true_ objects; enums are compiled to integral types.
* So, in C#, an enum is considers more like a user-defined primitive.
* <p>
* When working with enums, we can't always assume a RefModel is a nullable type (where default(YourType) == null),
* so this post processing runs through all models to find RefModel'd enums. Then, it runs through all vars and modifies
* those vars referencing RefModel'd enums to work the same as inlined enums rather than as objects.
*
* @param models processed models to be further processed for enum references
*/
@SuppressWarnings({"unchecked"})
private void postProcessEnumRefs(final Map<String, Object> models) {
Map<String, CodegenModel> enumRefs = new HashMap<String, CodegenModel>();
for (Map.Entry<String, Object> entry : models.entrySet()) {
CodegenModel model = ModelUtils.getModelByName(entry.getKey(), models);
if (model.isEnum) {
enumRefs.put(entry.getKey(), model);
}
}
for (Map.Entry<String, Object> entry : models.entrySet()) {
String openAPIName = entry.getKey();
CodegenModel model = ModelUtils.getModelByName(openAPIName, models);
if (model != null) {
for (CodegenProperty var : model.allVars) {
if (enumRefs.containsKey(var.dataType)) {
// Handle any enum properties referred to by $ref.
// This is different in C# than most other generators, because enums in C# are compiled to integral types,
// while enums in many other languages are true objects.
CodegenModel refModel = enumRefs.get(var.dataType);
var.allowableValues = refModel.allowableValues;
var.isEnum = true;
// We do these after updateCodegenPropertyEnum to avoid generalities that don't mesh with C#.
var.isPrimitiveType = true;
}
}
for (CodegenProperty var : model.vars) {
if (enumRefs.containsKey(var.dataType)) {
// Handle any enum properties referred to by $ref.
// This is different in C# than most other generators, because enums in C# are compiled to integral types,
// while enums in many other languages are true objects.
CodegenModel refModel = enumRefs.get(var.dataType);
var.allowableValues = refModel.allowableValues;
var.isEnum = true;
// We do these after updateCodegenPropertyEnum to avoid generalities that don't mesh with C#.
var.isPrimitiveType = true;
}
}
for (CodegenProperty var : model.readWriteVars) {
if (enumRefs.containsKey(var.dataType)) {
// Handle any enum properties referred to by $ref.
// This is different in C# than most other generators, because enums in C# are compiled to integral types,
// while enums in many other languages are true objects.
CodegenModel refModel = enumRefs.get(var.dataType);
var.allowableValues = refModel.allowableValues;
var.isEnum = true;
// We do these after updateCodegenPropertyEnum to avoid generalities that don't mesh with C#.
var.isPrimitiveType = true;
}
}
for (CodegenProperty var : model.readOnlyVars) {
if (enumRefs.containsKey(var.dataType)) {
// Handle any enum properties referred to by $ref.
// This is different in C# than most other generators, because enums in C# are compiled to integral types,
// while enums in many other languages are true objects.
CodegenModel refModel = enumRefs.get(var.dataType);
var.allowableValues = refModel.allowableValues;
var.isEnum = true;
// We do these after updateCodegenPropertyEnum to avoid generalities that don't mesh with C#.
var.isPrimitiveType = true;
}
}
/* Comment out the following as model.dataType is always the model name, eg. OuterIntegerEnum,
* and this will fix the integer enum via #9035.
* Only x-enum-byte is used in the template but it won't work due to the bug mentioned above.
* A better solution is to introduce isLong, isInteger, etc in the DefaultCodegen
* so that there is no need for each generator to post-process model enums.
*
// We're looping all models here.
if (model.isEnum) {
// We now need to make allowableValues.enumVars look like the context of CodegenProperty
Boolean isString = false;
Boolean isInteger = false;
Boolean isLong = false;
Boolean isByte = false;
if (model.dataType.startsWith("byte")) {
// C# Actually supports byte and short enums, swagger spec only supports byte.
isByte = true;
model.vendorExtensions.put("x-enum-byte", true);
} else if (model.dataType.startsWith("int32")) {
isInteger = true;
model.vendorExtensions.put("x-enum-integer", true);
} else if (model.dataType.startsWith("int64")) {
isLong = true;
model.vendorExtensions.put("x-enum-long", true);
} else {
// C# doesn't support non-integral enums, so we need to treat everything else as strings (e.g. to not lose precision or data integrity)
isString = true;
model.vendorExtensions.put("x-enum-string", true);
}
// Since we iterate enumVars for modelInnerEnum and enumClass templates, and CodegenModel is missing some of CodegenProperty's properties,
// we can take advantage of Mustache's contextual lookup to add the same "properties" to the model's enumVars scope rather than CodegenProperty's scope.
List<Map<String, String>> enumVars = (ArrayList<Map<String, String>>) model.allowableValues.get("enumVars");
List<Map<String, Object>> newEnumVars = new ArrayList<Map<String, Object>>();
for (Map<String, String> enumVar : enumVars) {
Map<String, Object> mixedVars = new HashMap<String, Object>();
mixedVars.putAll(enumVar);
mixedVars.put("isString", isString);
mixedVars.put("isLong", isLong);
mixedVars.put("isInteger", isInteger);
mixedVars.put("isByte", isByte);
newEnumVars.add(mixedVars);
}
if (!newEnumVars.isEmpty()) {
model.allowableValues.put("enumVars", newEnumVars);
}
} */
} else {
LOGGER.warn("Expected to retrieve model %s by name, but no model was found. Check your -Dmodels inclusions.", openAPIName);
}
}
}
/**
* Update codegen property's enum by adding "enumVars" (with name and value)
*
* @param var list of CodegenProperty
*/
@Override
public void updateCodegenPropertyEnum(CodegenProperty var) {
if (var.vendorExtensions == null) {
var.vendorExtensions = new HashMap<>();
}
super.updateCodegenPropertyEnum(var);
// Because C# uses nullable primitives for datatype, and datatype is used in DefaultCodegen for determining enum-ness, guard against weirdness here.
if (var.isEnum) {
if ("byte".equals(var.dataFormat)) {// C# Actually supports byte and short enums.
var.vendorExtensions.put("x-enum-byte", true);
var.isString = false;
var.isLong = false;
var.isInteger = false;
} else if ("int".equals(var.dataType) || "int32".equals(var.dataFormat)) {
var.isInteger = true;
var.isString = false;
var.isLong = false;
} else if ("int64".equals(var.dataFormat)) {
var.isLong = true;
var.isString = false;
var.isInteger = false;
} else {// C# doesn't support non-integral enums, so we need to treat everything else as strings (e.g. to not lose precision or data integrity)
var.isString = true;
var.isInteger = false;
var.isLong = false;
}
}
}
/**
* Update property if it is a C# value type
*
* @param models list of all models
*/
protected void updateValueTypeProperty(Map<String, Object> models) {
for (Map.Entry<String, Object> entry : models.entrySet()) {
String openAPIName = entry.getKey();
CodegenModel model = ModelUtils.getModelByName(openAPIName, models);
if (model != null) {
for (CodegenProperty var : model.vars) {
var.vendorExtensions.put("x-is-value-type", isValueType(var));
}
}
}
}
/**
* Update property if it is a C# nullable type
*
* @param models list of all models
*/
protected void updateNullableTypeProperty(Map<String, Object> models) {
for (Map.Entry<String, Object> entry : models.entrySet()) {
String openAPIName = entry.getKey();
CodegenModel model = ModelUtils.getModelByName(openAPIName, models);
if (model != null) {
for (CodegenProperty var : model.vars) {
if (!var.isContainer && (nullableType.contains(var.dataType) || var.isEnum)) {
var.vendorExtensions.put("x-csharp-value-type", true);
}
}
}
}
}
@Override
public Map<String, Object> postProcessOperationsWithModels(Map<String, Object> objs, List<Object> allModels) {
super.postProcessOperationsWithModels(objs, allModels);
if (objs != null) {
Map<String, Object> operations = (Map<String, Object>) objs.get("operations");
if (operations != null) {
List<CodegenOperation> ops = (List<CodegenOperation>) operations.get("operation");
for (CodegenOperation operation : ops) {
// Check return types for collection
if (operation.returnType != null) {
String typeMapping;
int namespaceEnd = operation.returnType.lastIndexOf(".");
if (namespaceEnd > 0) {
typeMapping = operation.returnType.substring(namespaceEnd);
} else {
typeMapping = operation.returnType;
}
if (this.collectionTypes.contains(typeMapping)) {
operation.isArray = true;
operation.returnContainer = operation.returnType;
if (this.returnICollection && (
typeMapping.startsWith("List") ||
typeMapping.startsWith("Collection"))) {
// NOTE: ICollection works for both List<T> and Collection<T>
int genericStart = typeMapping.indexOf("<");
if (genericStart > 0) {
operation.returnType = "ICollection" + typeMapping.substring(genericStart);
}
}
} else {
operation.returnContainer = operation.returnType;
operation.isMap = this.mapTypes.contains(typeMapping);
}
}
// check if the payload is json and set x-is-json accordingly
if (operation.consumes != null) {
for (Map<String, String> consume : operation.consumes) {
if (consume.containsKey("mediaType")) {
if (isJsonMimeType(consume.get("mediaType"))) {
operation.vendorExtensions.put("x-is-json", true);
break;
}
}
}
}
if (operation.examples != null) {
for (Map<String, String> example : operation.examples) {
for (Map.Entry<String, String> entry : example.entrySet()) {
// Replace " with \", \r, \n with \\r, \\n
String val = entry.getValue().replace("\"", "\\\"")
.replace("\r", "\\r")
.replace("\n", "\\n");
entry.setValue(val);
}
}
}
if (!isSupportNullable()) {
for (CodegenParameter parameter : operation.allParams) {
CodegenModel model = null;
for (Object modelHashMap : allModels) {
CodegenModel codegenModel = ((HashMap<String, CodegenModel>) modelHashMap).get("model");
if (codegenModel.getClassname().equals(parameter.dataType)) {
model = codegenModel;
break;
}
}
if (model == null) {
// Primitive data types all come already marked
parameter.isNullable = true;
} else {
// Effectively mark enum models as enums and non-nullable
if (model.isEnum) {
parameter.isEnum = true;
parameter.allowableValues = model.allowableValues;
parameter.isPrimitiveType = true;
parameter.isNullable = false;
} else {
parameter.isNullable = true;
}
}
}
} else {
// Effectively mark enum models as enums
updateCodegenParametersEnum(operation.allParams, allModels);
}
processOperation(operation);
}
}
}
return objs;
}
protected void processOperation(CodegenOperation operation) {
// default noop
}
private void updateCodegenParametersEnum(List<CodegenParameter> parameters, List<Object> allModels) {
for (CodegenParameter parameter : parameters) {
CodegenModel model = null;
for (Object modelHashMap : allModels) {
CodegenModel codegenModel = ((HashMap<String, CodegenModel>) modelHashMap).get("model");
if (codegenModel.getClassname().equals(parameter.dataType)) {
model = codegenModel;
break;
}
}
if (model != null) {
// Effectively mark enum models as enums and non-nullable
if (model.isEnum) {
parameter.isEnum = true;
parameter.allowableValues = model.allowableValues;
parameter.isPrimitiveType = true;
parameter.vendorExtensions.put("x-csharp-value-type", true);
}
}
if (!parameter.isContainer && nullableType.contains(parameter.dataType)) {
parameter.vendorExtensions.put("x-csharp-value-type", true);
}
if (!parameter.required && parameter.vendorExtensions.get("x-csharp-value-type") != null) { //optional
parameter.dataType = parameter.dataType + "?";
}
}
}
@Override
public String apiFileFolder() {
return outputFolder + File.separator + sourceFolder + File.separator + packageName + File.separator + apiPackage();
}
@Override
public String modelFileFolder() {
return outputFolder + File.separator + sourceFolder + File.separator + packageName + File.separator + modelPackage();
}
@Override
public String toModelFilename(String name) {
// should be the same as the model name
return toModelName(name);
}
@Override
public String toOperationId(String operationId) {
// throw exception if method name is empty (should not occur as an auto-generated method name will be used)
if (StringUtils.isEmpty(operationId)) {
throw new RuntimeException("Empty method name (operationId) not allowed");
}
// method name cannot use reserved keyword, e.g. return
if (isReservedWord(operationId)) {
LOGGER.warn("{} (reserved word) cannot be used as method name. Renamed to {}", operationId, camelize(sanitizeName("call_" + operationId)));
operationId = "call_" + operationId;
}
// operationId starts with a number
if (operationId.matches("^\\d.*")) {
LOGGER.warn("{} (starting with a number) cannot be used as method name. Renamed to {}", operationId, camelize(sanitizeName("call_" + operationId)));
operationId = "call_" + operationId;
}
return camelize(sanitizeName(operationId));
}
@Override
public String toVarName(String name) {
// sanitize name
name = sanitizeName(name);
// if it's all upper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize the variable name
// pet_id => PetId
name = camelize(name);
// for reserved word or word starting with number, append _
if (isReservedWord(name) || name.matches("^\\d.*")) {
name = escapeReservedWord(name);
}
if (propertySpecialKeywords.contains(name)) {
return camelize("property_" + name);
}
return name;
}
@Override
public String toParamName(String name) {
// sanitize name
name = sanitizeName(name);
// replace - with _ e.g. created-at => created_at
name = name.replaceAll("-", "_");
// if it's all upper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize(lower) the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved word or word starting with number, append _
if (isReservedWord(name) || name.matches("^\\d.*")) {
name = escapeReservedWord(name);
}
return name;
}
@Override
public String escapeReservedWord(String name) {
if (this.reservedWordsMappings().containsKey(name)) {
return this.reservedWordsMappings().get(name);
}
return "_" + name;
}
/**
* Return the example value of the property
*
* @param p OpenAPI property object
* @return string presentation of the example value of the property
*/
@Override
public String toExampleValue(Schema p) {
if (ModelUtils.isStringSchema(p)) {
if (p.getExample() != null) {
return "\"" + p.getExample().toString() + "\"";
}
} else if (ModelUtils.isBooleanSchema(p)) {
if (p.getExample() != null) {
return p.getExample().toString();
}
} else if (ModelUtils.isDateSchema(p)) {
// TODO
} else if (ModelUtils.isDateTimeSchema(p)) {
// TODO
} else if (ModelUtils.isNumberSchema(p)) {
if (p.getExample() != null) {
return p.getExample().toString();
}
} else if (ModelUtils.isIntegerSchema(p)) {
if (p.getExample() != null) {
return p.getExample().toString();
}
}
return null;
}
/**
* Return the default value of the property
* @param p OpenAPI property object
* @return string presentation of the default value of the property
*/
@Override
public String toDefaultValue(Schema p) {
if (ModelUtils.isBooleanSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isDateSchema(p)) {
if (p.getDefault() != null) {
return "\"" + p.getDefault().toString() + "\"";
}
} else if (ModelUtils.isDateTimeSchema(p)) {
if (p.getDefault() != null) {
return "\"" + p.getDefault().toString() + "\"";
}
} else if (ModelUtils.isNumberSchema(p)) {
if (p.getDefault() != null) {
if (ModelUtils.isFloatSchema(p)) { // float
return p.getDefault().toString() + "F";
} else if (ModelUtils.isDoubleSchema(p)) { // double
return p.getDefault().toString() + "D";
} else { // decimal
return p.getDefault().toString() + "M";
}
}
} else if (ModelUtils.isIntegerSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isStringSchema(p)) {
if (p.getDefault() != null) {
String _default = (String) p.getDefault();
if (p.getEnum() == null) {
return "\"" + _default + "\"";
} else {
// convert to enum var name later in postProcessModels
return _default;
}
}
}
return null;
}
@Override
protected boolean isReservedWord(String word) {
// NOTE: This differs from super's implementation in that C# does _not_ want case insensitive matching.
return reservedWords.contains(word);
}
public String getNullableType(Schema p, String type) {
if (languageSpecificPrimitives.contains(type)) {
return type;
} else {
return null;
}
}
@Override
public String getSchemaType(Schema p) {
String openAPIType = super.getSchemaType(p);
String type;
if (openAPIType == null) {
LOGGER.error("OpenAPI Type for {} is null. Default to UNKNOWN_OPENAPI_TYPE instead.", p.getName());
openAPIType = "UNKNOWN_OPENAPI_TYPE";
}
if (typeMapping.containsKey(openAPIType)) {
type = typeMapping.get(openAPIType);
String languageType = getNullableType(p, type);
if (languageType != null) {
return languageType;
}
} else {
type = openAPIType;
}
return toModelName(type);
}
/**
* Provides C# strongly typed declaration for simple arrays of some type and arrays of arrays of some type.
*
* @param arr The input array property
* @return The type declaration when the type is an array of arrays.
*/
private String getArrayTypeDeclaration(ArraySchema arr) {
// TODO: collection type here should be fully qualified namespace to avoid model conflicts
// This supports arrays of arrays.
String arrayType = typeMapping.get("array");
StringBuilder instantiationType = new StringBuilder(arrayType);
Schema items = arr.getItems();
String nestedType = getTypeDeclaration(items);
// TODO: We may want to differentiate here between generics and primitive arrays.
instantiationType.append("<").append(nestedType).append(">");
return instantiationType.toString();
}
@Override
public String toInstantiationType(Schema p) {
if (ModelUtils.isArraySchema(p)) {
return getArrayTypeDeclaration((ArraySchema) p);
}
return super.toInstantiationType(p);
}
@Override
public String getTypeDeclaration(Schema p) {
if (ModelUtils.isArraySchema(p)) {
return getArrayTypeDeclaration((ArraySchema) p);
} else if (ModelUtils.isMapSchema(p)) {
// Should we also support maps of maps?
Schema inner = getAdditionalProperties(p);
return getSchemaType(p) + "<string, " + getTypeDeclaration(inner) + ">";
}
return super.getTypeDeclaration(p);
}
@Override
public String toModelName(String name) {
// We need to check if import-mapping has a different model for this class, so we use it
// instead of the auto-generated one.
if (importMapping.containsKey(name)) {
return importMapping.get(name);
}
// memoization
String origName = name;
if (schemaKeyToModelNameCache.containsKey(origName)) {
return schemaKeyToModelNameCache.get(origName);
}
if (!StringUtils.isEmpty(modelNamePrefix)) {
name = modelNamePrefix + "_" + name;
}
if (!StringUtils.isEmpty(modelNameSuffix)) {
name = name + "_" + modelNameSuffix;
}
name = sanitizeName(name);
// model name cannot use reserved keyword, e.g. return
if (isReservedWord(name)) {
LOGGER.warn("{} (reserved word) cannot be used as model name. Renamed to {}", name, camelize("model_" + name));
name = "model_" + name; // e.g. return => ModelReturn (after camelize)
}
// model name starts with number
if (name.matches("^\\d.*")) {
LOGGER.warn("{} (model name starts with number) cannot be used as model name. Renamed to {}", name,
camelize("model_" + name));
name = "model_" + name; // e.g. 200Response => Model200Response (after camelize)
}
String camelizedName = camelize(name);
schemaKeyToModelNameCache.put(origName, camelizedName);
// camelize the model name
// phone_number => PhoneNumber
return camelizedName;
}
@Override
public String apiTestFileFolder() {
return outputFolder + ".Test";
}
@Override
public String modelTestFileFolder() {
return outputFolder + ".Test";
}
@Override
public String toApiTestFilename(String name) {
return toApiName(name) + "Tests";
}
@Override
public String toModelTestFilename(String name) {
return toModelName(name) + "Tests";
}
public void setLicenseUrl(String licenseUrl) {this.licenseUrl = licenseUrl;}
public void setLicenseName(String licenseName) {this.licenseName = licenseName;}
public void setPackageName(String packageName) {
this.packageName = packageName;
}
public void setPackageVersion(String packageVersion) {
this.packageVersion = packageVersion;
}
public void setPackageTitle(String packageTitle) {
this.packageTitle = packageTitle;
}
public void setPackageProductName(String packageProductName) {
this.packageProductName = packageProductName;
}
public void setPackageDescription(String packageDescription) {
this.packageDescription = packageDescription;
}
public void setPackageCompany(String packageCompany) {
this.packageCompany = packageCompany;
}
public void setPackageCopyright(String packageCopyright) {
this.packageCopyright = packageCopyright;
}
public void setPackageAuthors(String packageAuthors) {
this.packageAuthors = packageAuthors;
}
public void setSourceFolder(String sourceFolder) {
this.sourceFolder = sourceFolder;
}
public String getInterfacePrefix() {
return interfacePrefix;
}
public void setNullableReferenceTypes(final Boolean nullReferenceTypesFlag){
this.nullReferenceTypesFlag = nullReferenceTypesFlag;
if (nullReferenceTypesFlag == true){
this.nullableType.add("string");
}
}
public void setInterfacePrefix(final String interfacePrefix) {
this.interfacePrefix = interfacePrefix;
}
public void setEnumNameSuffix(final String enumNameSuffix) {
this.enumNameSuffix = enumNameSuffix;
}
public void setEnumValueSuffix(final String enumValueSuffix) {
this.enumValueSuffix = enumValueSuffix;
}
public boolean isSupportNullable() {
return supportNullable;
}
public void setSupportNullable(final boolean supportNullable) {
this.supportNullable = supportNullable;
}
@Override
public String toEnumValue(String value, String datatype) {
// C# only supports enums as literals for int, int?, long, long?, byte, and byte?. All else must be treated as strings.
// Per: https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/keywords/enum
// The approved types for an enum are byte, sbyte, short, ushort, int, uint, long, or ulong.
// but we're not supporting unsigned integral types or shorts.
if (datatype.startsWith("int") || datatype.startsWith("long") || datatype.startsWith("byte")) {
return value;
}
return escapeText(value);
}
@Override
public String toEnumVarName(String name, String datatype) {
if (name.length() == 0) {
return "Empty";
}
// for symbol, e.g. $, #
if (getSymbolName(name) != null) {
return camelize(getSymbolName(name));
}
String enumName = sanitizeName(name);
enumName = enumName.replaceFirst("^_", "");
enumName = enumName.replaceFirst("_$", "");
enumName = camelize(enumName) + this.enumValueSuffix;
if (enumName.matches("\\d.*")) { // starts with number
return "_" + enumName;
} else {
return enumName;
}
}
@Override
public String toEnumName(CodegenProperty property) {
return sanitizeName(camelize(property.name)) + this.enumNameSuffix;
}
public String testPackageName() {
return this.packageName + ".Test";
}
@Override
public String escapeQuotationMark(String input) {
// remove " to avoid code injection
return input.replace("\"", "");
}
@Override
public String escapeUnsafeCharacters(String input) {
return input.replace("*/", "*_/").replace("/*", "/_*").replace("--", "- -");
}
@Override
public boolean isDataTypeString(String dataType) {
// also treat double/decimal/float as "string" in enum so that the values (e.g. 2.8) get double-quoted
return "String".equalsIgnoreCase(dataType) ||
"double?".equals(dataType) || "decimal?".equals(dataType) || "float?".equals(dataType) ||
"double".equals(dataType) || "decimal".equals(dataType) || "float".equals(dataType);
}
/**
* Return true if the property being passed is a C# value type
*
* @param var property
* @return true if property is a value type
*/
protected boolean isValueType(CodegenProperty var) {
return (valueTypes.contains(var.dataType) || var.isEnum ) ;
}
@Override
public void setParameterExampleValue(CodegenParameter codegenParameter) {
// set the example value
// if not specified in x-example, generate a default value
// TODO need to revise how to obtain the example value
if (codegenParameter.vendorExtensions != null && codegenParameter.vendorExtensions.containsKey("x-example")) {
codegenParameter.example = Json.pretty(codegenParameter.vendorExtensions.get("x-example"));
} else if (Boolean.TRUE.equals(codegenParameter.isBoolean)) {
codegenParameter.example = "true";
} else if (Boolean.TRUE.equals(codegenParameter.isLong)) {
codegenParameter.example = "789";
} else if (Boolean.TRUE.equals(codegenParameter.isInteger)) {
codegenParameter.example = "56";
} else if (Boolean.TRUE.equals(codegenParameter.isFloat)) {
codegenParameter.example = "3.4F";
} else if (Boolean.TRUE.equals(codegenParameter.isDouble)) {
codegenParameter.example = "1.2D";
} else if (Boolean.TRUE.equals(codegenParameter.isNumber)) {
codegenParameter.example = "8.14";
} else if (Boolean.TRUE.equals(codegenParameter.isBinary)) {
codegenParameter.example = "BINARY_DATA_HERE";
} else if (Boolean.TRUE.equals(codegenParameter.isByteArray)) {
codegenParameter.example = "BYTE_ARRAY_DATA_HERE";
} else if (Boolean.TRUE.equals(codegenParameter.isFile)) {
codegenParameter.example = "/path/to/file.txt";
} else if (Boolean.TRUE.equals(codegenParameter.isDate)) {
codegenParameter.example = "2013-10-20";
} else if (Boolean.TRUE.equals(codegenParameter.isDateTime)) {
codegenParameter.example = "2013-10-20T19:20:30+01:00";
} else if (Boolean.TRUE.equals(codegenParameter.isUuid)) {
codegenParameter.example = "38400000-8cf0-11bd-b23e-10b96e4ef00d";
} else if (Boolean.TRUE.equals(codegenParameter.isUri)) {
codegenParameter.example = "https://openapi-generator.tech";
} else if (Boolean.TRUE.equals(codegenParameter.isString)) {
codegenParameter.example = codegenParameter.paramName + "_example";
}
}
@Override
public void postProcessParameter(CodegenParameter parameter) {
super.postProcessParameter(parameter);
// ensure a method's parameters are marked as nullable when nullable or when nullReferences are enabled
// this is mostly needed for reference types used as a method's parameters
if (!parameter.required && (nullReferenceTypesFlag || nullableType.contains(parameter.dataType))) {
parameter.dataType = parameter.dataType.endsWith("?")
? parameter.dataType
: parameter.dataType + "?";
}
}
@Override
public void postProcessFile(File file, String fileType) {
if (file == null) {
return;
}
String csharpPostProcessFile = System.getenv("CSHARP_POST_PROCESS_FILE");
if (StringUtils.isEmpty(csharpPostProcessFile)) {
return; // skip if CSHARP_POST_PROCESS_FILE env variable is not defined
}
// only process files with .cs extension
if ("cs".equals(FilenameUtils.getExtension(file.toString()))) {
String command = csharpPostProcessFile + " " + file.toString();
try {
Process p = Runtime.getRuntime().exec(command);
int exitValue = p.waitFor();
if (exitValue != 0) {
LOGGER.error("Error running the command ({}). Exit code: {}", command, exitValue);
} else {
LOGGER.info("Successfully executed: {}", command);
}
} catch (InterruptedException | IOException e) {
LOGGER.error("Error running the command ({}). Exception: {}", command, e.getMessage());
// Restore interrupted state
Thread.currentThread().interrupt();
}
}
}
}
| [
"\"CSHARP_POST_PROCESS_FILE\"",
"\"CSHARP_POST_PROCESS_FILE\""
]
| []
| [
"CSHARP_POST_PROCESS_FILE"
]
| [] | ["CSHARP_POST_PROCESS_FILE"] | java | 1 | 0 | |
playwright/_impl/_transport.py | # Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import io
import json
import os
import subprocess
import sys
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Callable, Dict, Optional, Union
import websockets
import websockets.exceptions
from pyee import AsyncIOEventEmitter
from websockets.client import connect as websocket_connect
from playwright._impl._api_types import Error
from playwright._impl._driver import get_driver_env
from playwright._impl._helper import ParsedMessagePayload
# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77
def _get_stderr_fileno() -> Optional[int]:
try:
# when using pythonw, sys.stderr is None.
# when Pyinstaller is used, there is no closed attribute because Pyinstaller monkey-patches it with a NullWriter class
if sys.stderr is None or not hasattr(sys.stderr, "closed"):
return None
if sys.stderr.closed:
return None
return sys.stderr.fileno()
except (AttributeError, io.UnsupportedOperation):
# pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
# https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
# This is potentially dangerous, but the best we can do.
if not hasattr(sys, "__stderr__") or not sys.__stderr__:
return None
return sys.__stderr__.fileno()
class Transport(ABC):
def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
self._loop = loop
self.on_message: Callable[[ParsedMessagePayload], None] = lambda _: None
self.on_error_future: asyncio.Future = loop.create_future()
@abstractmethod
def request_stop(self) -> None:
pass
def dispose(self) -> None:
pass
@abstractmethod
async def wait_until_stopped(self) -> None:
pass
@abstractmethod
async def connect(self) -> None:
pass
@abstractmethod
async def run(self) -> None:
pass
@abstractmethod
def send(self, message: Dict) -> None:
pass
def serialize_message(self, message: Dict) -> bytes:
msg = json.dumps(message)
if "DEBUGP" in os.environ: # pragma: no cover
print("\x1b[32mSEND>\x1b[0m", json.dumps(message, indent=2))
return msg.encode()
def deserialize_message(self, data: Union[str, bytes]) -> ParsedMessagePayload:
obj = json.loads(data)
if "DEBUGP" in os.environ: # pragma: no cover
print("\x1b[33mRECV>\x1b[0m", json.dumps(obj, indent=2))
return obj
class PipeTransport(Transport):
def __init__(
self, loop: asyncio.AbstractEventLoop, driver_executable: Path
) -> None:
super().__init__(loop)
self._stopped = False
self._driver_executable = driver_executable
def request_stop(self) -> None:
assert self._output
self._stopped = True
self._output.close()
async def wait_until_stopped(self) -> None:
await self._stopped_future
async def connect(self) -> None:
self._stopped_future: asyncio.Future = asyncio.Future()
# Hide the command-line window on Windows when using Pythonw.exe
creationflags = 0
if sys.platform == "win32" and sys.stdout is None:
creationflags = subprocess.CREATE_NO_WINDOW
try:
# For pyinstaller
env = get_driver_env()
if getattr(sys, "frozen", False):
env.setdefault("PLAYWRIGHT_BROWSERS_PATH", "0")
self._proc = await asyncio.create_subprocess_exec(
str(self._driver_executable),
"run-driver",
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=_get_stderr_fileno(),
limit=32768,
creationflags=creationflags,
env=env,
)
except Exception as exc:
self.on_error_future.set_exception(exc)
raise exc
self._output = self._proc.stdin
async def run(self) -> None:
assert self._proc.stdout
assert self._proc.stdin
while not self._stopped:
try:
buffer = await self._proc.stdout.readexactly(4)
if self._stopped:
break
length = int.from_bytes(buffer, byteorder="little", signed=False)
buffer = bytes(0)
while length:
to_read = min(length, 32768)
data = await self._proc.stdout.readexactly(to_read)
if self._stopped:
break
length -= to_read
if len(buffer):
buffer = buffer + data
else:
buffer = data
if self._stopped:
break
obj = self.deserialize_message(buffer)
self.on_message(obj)
except asyncio.IncompleteReadError:
break
await asyncio.sleep(0)
await self._proc.wait()
self._stopped_future.set_result(None)
def send(self, message: Dict) -> None:
assert self._output
data = self.serialize_message(message)
self._output.write(
len(data).to_bytes(4, byteorder="little", signed=False) + data
)
class WebSocketTransport(AsyncIOEventEmitter, Transport):
def __init__(
self,
loop: asyncio.AbstractEventLoop,
ws_endpoint: str,
headers: Dict[str, str] = None,
slow_mo: float = None,
) -> None:
super().__init__(loop)
Transport.__init__(self, loop)
self._stopped = False
self.ws_endpoint = ws_endpoint
self.headers = headers
self.slow_mo = slow_mo
def request_stop(self) -> None:
self._stopped = True
self.emit("close")
self._loop.create_task(self._connection.close())
def dispose(self) -> None:
self.on_error_future.cancel()
async def wait_until_stopped(self) -> None:
await self._connection.wait_closed()
async def connect(self) -> None:
try:
self._connection = await websocket_connect(
self.ws_endpoint,
extra_headers=self.headers,
max_size=256 * 1024 * 1024, # 256Mb
)
except Exception as exc:
self.on_error_future.set_exception(Error(f"websocket.connect: {str(exc)}"))
raise exc
async def run(self) -> None:
while not self._stopped:
try:
message = await self._connection.recv()
if self.slow_mo is not None:
await asyncio.sleep(self.slow_mo / 1000)
if self._stopped:
self.on_error_future.set_exception(
Error("Playwright connection closed")
)
break
obj = self.deserialize_message(message)
self.on_message(obj)
except (
websockets.exceptions.ConnectionClosed,
websockets.exceptions.ConnectionClosedError,
):
if not self._stopped:
self.emit("close")
self.on_error_future.set_exception(
Error("Playwright connection closed")
)
break
except Exception as exc:
self.on_error_future.set_exception(exc)
break
def send(self, message: Dict) -> None:
if self._stopped or (hasattr(self, "_connection") and self._connection.closed):
raise Error("Playwright connection closed")
data = self.serialize_message(message)
self._loop.create_task(self._connection.send(data))
| []
| []
| []
| [] | [] | python | 0 | 0 | |
data/test/python/f7181b87434e6a3a078b7f233f6a61d24e5fe9ccbase.py | from __future__ import absolute_import
import os
import sys
from django.core.management.base import BaseCommand
import celery
import djcelery
DB_SHARED_THREAD = """\
DatabaseWrapper objects created in a thread can only \
be used in that same thread. The object with alias '%s' \
was created in thread id %s and this is thread id %s.\
"""
def patch_thread_ident():
# monkey patch django.
# This patch make sure that we use real threads to get the ident which
# is going to happen if we are using gevent or eventlet.
# -- patch taken from gunicorn
if getattr(patch_thread_ident, 'called', False):
return
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if 'validate_thread_sharing' in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing
and self._thread_ident != _get_ident()):
raise DatabaseError(
DB_SHARED_THREAD % (
self.alias, self._thread_ident, _get_ident()),
)
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = \
_validate_thread_sharing
patch_thread_ident.called = True
except ImportError:
pass
patch_thread_ident()
class CeleryCommand(BaseCommand):
options = BaseCommand.option_list
skip_opts = ['--app', '--loader', '--config']
keep_base_opts = False
def get_version(self):
return 'celery %s\ndjango-celery %s' % (celery.__version__,
djcelery.__version__)
def execute(self, *args, **options):
broker = options.get('broker')
if broker:
self.set_broker(broker)
super(CeleryCommand, self).execute(*args, **options)
def set_broker(self, broker):
os.environ['CELERY_BROKER_URL'] = broker
def run_from_argv(self, argv):
self.handle_default_options(argv[2:])
return super(CeleryCommand, self).run_from_argv(argv)
def handle_default_options(self, argv):
acc = []
broker = None
for i, arg in enumerate(argv):
if '--settings=' in arg:
_, settings_module = arg.split('=')
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
elif '--pythonpath=' in arg:
_, pythonpath = arg.split('=')
sys.path.insert(0, pythonpath)
elif '--broker=' in arg:
_, broker = arg.split('=')
elif arg == '-b':
broker = argv[i + 1]
else:
acc.append(arg)
if broker:
self.set_broker(broker)
return argv if self.keep_base_opts else acc
def die(self, msg):
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit()
@property
def option_list(self):
return [x for x in self.options
if x._long_opts[0] not in self.skip_opts]
| []
| []
| [
"DJANGO_SETTINGS_MODULE",
"CELERY_BROKER_URL"
]
| [] | ["DJANGO_SETTINGS_MODULE", "CELERY_BROKER_URL"] | python | 2 | 0 | |
docker/docker.go | package main
import (
"crypto/tls"
"fmt"
"os"
"runtime"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/client"
"github.com/docker/docker/autogen/dockerversion"
"github.com/docker/docker/opts"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/reexec"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/pkg/tlsconfig"
"github.com/docker/docker/utils"
)
const (
defaultTrustKeyFile = "key.json"
defaultCaFile = "ca.pem"
defaultKeyFile = "key.pem"
defaultCertFile = "cert.pem"
)
func main() {
if reexec.Init() {
return
}
// Set terminal emulation based on platform as required.
stdin, stdout, stderr := term.StdStreams()
initLogging(stderr)
flag.Parse()
// FIXME: validate daemon flags here
if *flVersion {
showVersion()
return
}
if *flLogLevel != "" {
lvl, err := logrus.ParseLevel(*flLogLevel)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", *flLogLevel)
os.Exit(1)
}
setLogLevel(lvl)
} else {
setLogLevel(logrus.InfoLevel)
}
if *flDebug {
os.Setenv("DEBUG", "1")
setLogLevel(logrus.DebugLevel)
}
if len(flHosts) == 0 {
defaultHost := os.Getenv("DOCKER_HOST")
if defaultHost == "" || *flDaemon {
if runtime.GOOS != "windows" {
// If we do not have a host, default to unix socket
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
} else {
// If we do not have a host, default to TCP socket on Windows
defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
}
}
defaultHost, err := opts.ValidateHost(defaultHost)
if err != nil {
if *flDaemon {
logrus.Fatal(err)
} else {
fmt.Fprint(os.Stderr, err)
}
os.Exit(1)
}
flHosts = append(flHosts, defaultHost)
}
setDefaultConfFlag(flTrustKey, defaultTrustKeyFile)
// Regardless of whether the user sets it to true or false, if they
// specify --tlsverify at all then we need to turn on tls
// *flTlsVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well
if flag.IsSet("-tlsverify") || *flTlsVerify {
*flTls = true
}
if *flDaemon {
if *flHelp {
flag.Usage()
return
}
mainDaemon()
return
}
// From here on, we assume we're a client, not a server.
if len(flHosts) > 1 {
fmt.Fprintf(os.Stderr, "Please specify only one -H")
os.Exit(0)
}
protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
var tlsConfig *tls.Config
if *flTls {
tlsOptions.InsecureSkipVerify = !*flTlsVerify
if !flag.IsSet("-tlscert") {
if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) {
tlsOptions.CertFile = ""
}
}
if !flag.IsSet("-tlskey") {
if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) {
tlsOptions.KeyFile = ""
}
}
var err error
tlsConfig, err = tlsconfig.Client(tlsOptions)
if err != nil {
fmt.Fprintln(stderr, err)
os.Exit(1)
}
}
cli := client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], tlsConfig)
if err := cli.Cmd(flag.Args()...); err != nil {
if sterr, ok := err.(client.StatusError); ok {
if sterr.Status != "" {
fmt.Fprintln(cli.Err(), sterr.Status)
os.Exit(1)
}
os.Exit(sterr.StatusCode)
}
fmt.Fprintln(cli.Err(), err)
os.Exit(1)
}
}
func showVersion() {
if utils.ExperimentalBuild() {
fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.VERSION, dockerversion.GITCOMMIT)
} else {
fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT)
}
}
| [
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
SLU/slu_models.py | # coding: utf8
import os
import sys
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
#sys.path.append( os.environ['RNNTAGGERPATH'] )
from fairseq.globals import *
#import utils_classes as Cl
# ---------- Decoders from LD-RNN tool ----------
# This part is from my first system coded from scratch with pytorch, so be comprehensive if you see bullshits reading it :-)
class SimpleDecoder(nn.Module):
def __init__(self, nn_params, input_size, direction):
super(SimpleDecoder, self).__init__()
# TMP FOR DEBUG
self.debug_flag = False
self.attention_heads = nn_params.attention_heads
self.start_tag_idx = nn_params.start_tag_idx
self.end_tag_idx = nn_params.end_tag_idx
self.batch_size = nn_params.batch_size
self.vocab_size = nn_params.word_vocab_size
self.char_vocab_size = nn_params.char_vocab_size
self.tagset_size = nn_params.tag_vocab_size
self.hidden_dim = 2*nn_params.hidden_dim
self.label_embed_dim = nn_params.label_embed_dim # NEW
self.char_embed_dim = nn_params.char_embed_dim
self.char_hidden_dim = nn_params.char_hidden_dim
self.label_context_size = nn_params.label_context_size
self.lex_hidden_layers = nn_params.lex_hidden_layers
self.lab_hidden_layers = nn_params.lab_hidden_layers
# TMP FOR DEBUG
#self.word_dict = nn_params.word_dict
#self.label_dict = nn_params.label_dict
#self.ix_to_sublabel = nn_params.ix_to_sublabel
print(' - SimpleDecoder init:')
print(' - start_tag_idx: {}'.format(self.start_tag_idx))
print(' - end_tag_idx: {}'.format(self.end_tag_idx))
print(' - batch_size: {}'.format(self.batch_size))
print(' - vocab_size: {}'.format(self.vocab_size))
print(' - char_vocab_size: {}'.format(self.char_vocab_size))
print(' - tagset_size: {}'.format(self.tagset_size))
print(' - hidden_dim: {}'.format(self.hidden_dim))
print(' - label_embed_dim: {}'.format(self.label_embed_dim))
print(' - char_embed_dim: {}'.format(self.char_embed_dim))
print(' - char_hidden_dim: {}'.format(self.char_hidden_dim))
print(' - label_context_size: {}'.format(self.label_context_size))
print(' - lex_hidden_layers: {}'.format(self.lex_hidden_layers))
print(' - lab_hidden_layers: {}'.format(self.lab_hidden_layers))
print(' ----------')
self.n_subparts = nn_params.n_subparts
self.sl_batch_size = 1
if self.n_subparts > 0:
self.tag_to_subparts = nn_params.tag_to_subparts
self.num_directions = 1
self.CUDA = nn_params.CUDA
self.TEST = 0
self.TeachingSignal = True
self.dtype = nn_params.dtype
self.ltype = nn_params.ltype
self.direction = direction
self.output_length_factor = nn_params.output_length_factor
if self.direction == 0 or self.direction == 1:
self.output_length_factor = 1.0
print(' *** SimpleDecoder, output-length-factor: {}'.format(self.output_length_factor))
sys.stdout.flush()
self.bw_label_embeddings = nn.Embedding(self.tagset_size, nn_params.label_embed_dim, sparse=False)
self.emb_dropout_p = nn_params.embed_dropout # NEW
self.embed_dropout = nn.Dropout(p=nn_params.embed_dropout)
self.attention_size = input_size # TMP
attention_size = input_size
#self.hidden_dim = input_size + nn_params.label_context_size * nn_params.label_embed_dim + nn_params.attention_heads * attention_size
#if self.n_subparts > 0:
# self.hidden_dim = self.hidden_dim + nn_params.sublabel_hidden_dim
self.input_dim = input_size + nn_params.label_context_size * nn_params.label_embed_dim
#if self.n_subparts > 0:
# self.input_dim = self.input_dim + nn_params.sublabel_hidden_dim
self.BWInputNorm = nn.LayerNorm(self.input_dim)
self.HiddenSizeMap = nn.Linear(self.input_dim, self.hidden_dim)
if self.attention_heads > 0:
print(' *** SimpleDecoder: using gated attention context')
sys.stdout.flush()
self.h_lin = nn.Linear(input_size, input_size)
self.a_lin = nn.Linear(attention_size, attention_size)
self.LexAttention = ga.GlobalAttention([self.hidden_dim, attention_size, attention_size], attention_size)
#self.LexAttention = MultiHeadAttention(attention_size, self.hidden_dim, attention_size, nn_params.attention_heads, nn_params.attention_type, self.dtype) # NEW
#self.SemAttention = AttentionModule(self.hidden_dim, self.hidden_dim, self.hidden_dim, nn_params.attention_heads, nn_params.attention_type, self.dtype) # NEW
self.SLM = SubLabelModule(nn_params, input_size)
self.RNNInputNorm = nn.LayerNorm(self.hidden_dim)
#self.bw_RNN = nn.GRU(self.hidden_dim, self.hidden_dim, bidirectional=False)
self.bw_RNN = ContextualFeatureEncoder(self.hidden_dim, self.hidden_dim, self.batch_size, 1, False, nn_params.dtype, nn_params.contextual_encoder_type)
#self.bw_RNN.flatten_parameters()
self.MLPInputNorm = nn.LayerNorm(self.hidden_dim)
self.BWOutputNorm = nn.LayerNorm(self.tagset_size)
self.output_mlp = ReLU_MLP( [2,self.hidden_dim, self.hidden_dim] )
output_dim = self.hidden_dim
if self.n_subparts > 0:
output_dim = output_dim + nn_params.sublabel_hidden_dim
self.bw_hidden2tag = nn.Linear(output_dim, self.tagset_size)
self.hid_dropout_p = nn_params.hidden_dropout # NEW
self.hidden_dropout = nn.Dropout(p=nn_params.hidden_dropout)
#self.dir_hidden = self.bw_RNN.get_hidden_state()
def init_hidden(self):
#self.dir_hidden = torch.zeros(1, self.batch_size, self.hidden_dim).type(self.dtype) #VARIABLE
self.bw_RNN.init_hidden()
self.SLM.init_hidden()
def resize_embeddings(self, nn_params):
if nn_params.tag_vocab_size > self.tagset_size:
old_embeddings = self.bw_label_embeddings
self.bw_label_embeddings = nn.Embedding(nn_params.tag_vocab_size, nn_params.label_embed_dim, sparse=False)
self.bw_label_embeddings.weight[:self.tagset_size,:] = old_embeddings.weight
old_lin = self.bw_hidden2tag
output_dim = self.hidden_dim
if self.n_subparts > 0:
output_dim = output_dim + nn_params.sublabel_hidden_dim
self.bw_hidden2tag = nn.Linear(output_dim, nn_params.tag_vocab_size)
self.bw_hidden2tag.weight[:self.tagset_size,:] = old_lin.weight
old_norm = self.BWOutputNorm
self.BWOutputNorm = nn.LayerNorm(nn_params.tag_vocab_size)
self.BWOutputNorm.weight[:self.tagset_size] = old_norm.weight
self.tagset_size = nn_params.tag_vocab_size
def train_forward(self, input, bw_streams):
dims = input[0].size()
sequence_length = dims[0]
batch_size = self.batch_size
bw_label_streams = bw_streams[0]
next_sublabels = bw_streams[1]
indeces = decoding_indeces_(self.direction, sequence_length, self.output_length_factor)
source_length = sequence_length
sequence_length = len(indeces)
gold_sequence_length = bw_label_streams[0].size(0)
gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)
source_idxs = [int( i / self.output_length_factor ) for i in indeces]
target_idxs = [int( i * gold_to_hyp_length_factor ) for i in indeces]
# NEW: TEST IT!!!
bin_size = 1
if self.output_length_factor < 1.0:
bin_size = int(1 / self.output_length_factor) + 1
input_tsr = torch.cat( input, 2 )[source_idxs,:,:]
local_input = [input_tsr]
local_input.append( self.embed_dropout( self.bw_label_embeddings(bw_label_streams[0][target_idxs,:]) ) )
# TMP FOR DEBUG
if self.debug_flag:
print('')
print(' ************************************************')
print(' * SimpleDecoder.train_forward -')
print('')
print(' *** indeces ({}): {}'.format(len(indeces), list(indeces)))
print(' *** source_idxs ({}): {}'.format(len(source_idxs), source_idxs))
print(' *** target_idxs ({}): {}'.format(len(target_idxs), target_idxs))
print('*')
print(' * Size of input: {}'.format( torch.cat(input, 2).size() ))
print(' * Size of local_input: {}'.format( torch.cat(local_input, 2).size() ))
print(' * Size of bw_label_streams: {}'.format(bw_label_streams[0].size()))
print(' *')
print(' * SimpleDecoder.train_forward, backward sublabels and labels:')
for tgt_idx in target_idxs:
# print(' {}'.format([self.ix_to_sublabel[sl.item()] for sl in next_sublabels[tgt_idx,:,0]]))
print(' -----')
print('@{}, {}'.format(tgt_idx, self.label_dict.index2token(bw_label_streams[0][tgt_idx,0])))
print('')
print(' * SimpleDecoder.train_forward, len of local_input: {}'.format(len(local_input)))
for debug_idx in range(len(local_input)):
print(' * {}'.format(local_input[debug_idx].size()))
print(' ---')
#print(' * SimpleDecoder.train_forward, size of next_sublabels: {}'.format(next_sublabels.size()))
print(' * SimpleDecoder.train_forward, size of bw_label_streams[0]: {}'.format(bw_label_streams[0].size()))
print('')
# END TMP FOR DEBUG
bw_sublabels_rep = []
if self.n_subparts > 0:
bw_sublabels_rep = self.SLM( input_tsr, next_sublabels[target_idxs,:,:], 1 )
# TMP FOR DEBUG
if self.debug_flag:
#print(' * SimpleDecoder.train_forward, size of bw_sublabels_rep: {}'.format(bw_sublabels_rep[0].size()))
print(' ***********************************************************')
sys.stdout.flush()
#local_input = local_input + bw_sublabels_rep
bw_total_input = self.BWInputNorm( torch.cat( local_input, 2 ) )
#self.bw_RNN.flatten_parameters()
#idxs = range(bw_total_input.size(0),-1,-1)
rnn_input = self.RNNInputNorm( self.HiddenSizeMap( bw_total_input ) )
bw_hidden_state, self.dir_hidden = self.bw_RNN( rnn_input )
bw_mlp_input = rnn_input + self.hidden_dropout( bw_hidden_state )
deep_reps = self.output_mlp( self.MLPInputNorm( bw_mlp_input ) )
#bw_final_input = [bw_mlp_input + self.hidden_dropout( deep_reps )] + bw_sublabels_rep
bw_final_input = torch.cat( [bw_mlp_input + self.hidden_dropout(deep_reps)] + bw_sublabels_rep, -1 )
bw_scores = F.log_softmax( self.BWOutputNorm( self.bw_hidden2tag( bw_final_input ) ), dim=2 )
return (bw_hidden_state, bw_scores)
# NOTE: we assume "input" is a list of all inputs given to this layer, "bw_label_stream" is the stream of backward labels, so that accessing the i-th position of bw_label_stream when predicting label at position i, gives the label on the right of the current position.
def fast_forward(self, input, bw_streams):
vflag = (self.TEST == 1)
if self.TeachingSignal and (not vflag):
return self.train_forward(input, bw_streams)
else:
return self.test_forward(input, bw_streams)
def test_forward(self, input, bw_streams):
# NOTE: we assume the first element of input is the lexical-level representation computed by the encoder, that is its hidden state.
#lex_rnn_out = input[0]
vflag = (self.TEST == 1)
dims = input[0].size()
sequence_length = dims[0]
batch_size = self.batch_size
bw_label_streams = bw_streams[0]
#print(' - SimpleDecoder.forward, input size: {}'.format(input[0].size()))
#sys.stdout.flush()
indeces = decoding_indeces_(self.direction, sequence_length, self.output_length_factor)
source_length = sequence_length
sequence_length = len(indeces)
gold_sequence_length = bw_label_streams[0].size(0)
gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)
embedding_mask = dropout_mask_dims( [1, batch_size, self.label_embed_dim], self.emb_dropout_p, self.dtype)
hidden_layer_mask = dropout_mask_dims( [batch_size, self.hidden_dim], self.hid_dropout_p, self.dtype)
if vflag:
embedding_mask = torch.ones( [1, batch_size, self.label_embed_dim] ).type(self.dtype)
hidden_layer_mask = torch.ones( [batch_size, self.hidden_dim] ).type(self.dtype)
hidden_state = torch.zeros(sequence_length, batch_size, self.hidden_dim).type(self.dtype) #VARIABLE
scores = torch.zeros(sequence_length, batch_size, self.tagset_size).type(self.dtype) #VARIABLE
start_idx = 0
if self.direction == 1 or self.direction == 3:
start_idx = -1
next_labels = bw_label_streams[0][start_idx,:]
prev_input = torch.cat( input, 2 )
next_sublabels = bw_streams[1] #VARIABLE
# NEW: TEST IT!!!
bin_size = 1
if self.output_length_factor < 1.0:
bin_size = int(1 / self.output_length_factor) + 1
for i in indeces:
source_idx = int( i / self.output_length_factor )
bin_bound = min(source_length,source_idx+bin_size) # NEW: TEXT IT!!!
target_idx = int( i * gold_to_hyp_length_factor )
if self.TeachingSignal and (not vflag):
next_labels = bw_label_streams[0][target_idx,:]
if self.n_subparts > 0:
next_sublabels = bw_streams[1][target_idx,:,:] #VARIABLE #GRAPHCHECKPOINT
curr_lex_input = torch.sum( prev_input[source_idx:bin_bound,:,:], 0 ) # SOURCE INDEXING ## This is ~different in 'train_forward'
#curr_lex_input = prev_input[source_idx,:,:] # TMP, SOURCE INDEXING ...
bw_sublabels_rep = self.SLM( curr_lex_input, next_sublabels, 0 )
bw_total_input_lst = [curr_lex_input.view(1, batch_size, -1)] # SOURCE INDEXING # NEW: TEST IT!!!
bw_total_input_lst.append( self.embed_dropout( self.bw_label_embeddings( next_labels ).view(1, batch_size, -1) ) )
if self.attention_heads > 0:
#print(' xxx SimpleDecoder, applying attention: {}, {}'.format(hidden_state[i,:,:].size(), prev_input.size()))
#sys.stdout.flush()
c, alphas = self.LexAttention( hidden_state[i,:,:].clone().detach().view(batch_size, 1, -1), prev_input.transpose(0,1).contiguous().detach() )
#bw_total_input_lst.append( c )
# We gate-mix the original input and the attention vector
g_lambda = F.sigmoid( self.h_lin( bw_total_input_lst[0] ) + self.a_lin(c) )
bw_total_input_lst[0] = g_lambda * bw_total_input_lst[0] + (1.0 - g_lambda) * c
bw_total_input = self.BWInputNorm( torch.cat( bw_total_input_lst, 2 ) )
rnn_input = self.RNNInputNorm( self.hidden_dropout( self.HiddenSizeMap( bw_total_input ) ) ) # NEW: hidden_dropout !
_, dec_hidden_state = self.bw_RNN( rnn_input )
#hidden_state[i,:,:] = dec_hidden_state[0,:,:]
bw_mlp_input = self.MLPInputNorm( rnn_input[0] + self.hidden_dropout( dec_hidden_state[0,:,:] ) )
deep_reps = self.output_mlp( bw_mlp_input )
final_dec_state = bw_mlp_input + self.hidden_dropout( deep_reps )
hidden_state[i,:,:] = final_dec_state
bw_final_input = torch.cat( [final_dec_state] + bw_sublabels_rep, -1 )
scores[i,:,:] = F.log_softmax( self.BWOutputNorm( self.bw_hidden2tag( bw_final_input ) ), dim=1 )
(max_scores, max_indeces) = torch.max(scores[i,:,:], 1)
max_indeces = max_indeces.squeeze()
if vflag:
next_labels = max_indeces
next_labels = next_labels.view(self.batch_size)
max_indeces = max_indeces.unsqueeze(0)
if self.n_subparts > 0:
next_sublabels = torch.LongTensor(self.tag_to_subparts[max_indeces].transpose(0,1)).type(self.ltype) #VARIABLE #GRAPHCHECKPOINT
return (hidden_state, scores)
def forward(self, input, bw_streams):
return self.test_forward(input, bw_streams)
def set_batch_size(self, val):
self.batch_size = val
if self.n_subparts > 0:
self.sl_batch_size = val
self.bw_RNN.set_batch_size( val )
self.SLM.set_batch_size(val)
def set_test_mode(self, val):
self.TEST = val
self.bw_RNN.set_test_mode( val )
def set_teaching_signal_flag(self, val):
self.TeachingSignal = val
class BidirectionalDecoder(nn.Module):
def __init__(self, nn_params, input_size, direction):
super(BidirectionalDecoder, self).__init__()
# TMP FOR DEBUG
self.debug_flag = False
self.attention_heads = nn_params.attention_heads
self.start_tag_idx = nn_params.start_tag_idx
self.end_tag_idx = nn_params.end_tag_idx
self.batch_size = nn_params.batch_size
self.vocab_size = nn_params.word_vocab_size
self.char_vocab_size = nn_params.char_vocab_size
self.tagset_size = nn_params.tag_vocab_size
self.hidden_dim = 2*nn_params.hidden_dim
self.label_embed_dim = nn_params.label_embed_dim # NEW
self.char_embed_dim = nn_params.char_embed_dim
self.char_hidden_dim = nn_params.char_hidden_dim
self.label_context_size = nn_params.label_context_size
self.lex_hidden_layers = nn_params.lex_hidden_layers
self.lab_hidden_layers = nn_params.lab_hidden_layers
self.n_subparts = nn_params.n_subparts
self.sl_batch_size = 1
if self.n_subparts > 0:
self.tag_to_subparts = nn_params.tag_to_subparts
self.num_directions = 1
self.CUDA = nn_params.CUDA
self.TEST = 0
self.TeachingSignal = True
self.dtype = nn_params.dtype
self.ltype = nn_params.ltype
self.direction = direction
self.output_length_factor = nn_params.output_length_factor
if self.direction == 0 or self.direction == 1:
self.output_length_factor = 1.0
# TMP FOR DEBUG
#self.word_dict = nn_params.word_dict
#self.label_dict = nn_params.label_dict
#self.ix_to_sublabel = nn_params.ix_to_sublabel
self.fw_label_embeddings = nn.Embedding(self.tagset_size, nn_params.label_embed_dim, sparse=False)
self.emb_dropout_p = nn_params.embed_dropout # NEW
self.embed_dropout = nn.Dropout(p=nn_params.embed_dropout)
attention_size = input_size
sem_attention_size = self.hidden_dim
self.input_dim = input_size + nn_params.label_context_size * nn_params.label_embed_dim
self.FWInputNorm = nn.LayerNorm( self.input_dim )
self.HiddenSizeMap = nn.Linear(self.input_dim, self.hidden_dim)
if self.attention_heads > 0:
self.h_lin = nn.Linear(attention_size, attention_size)
self.a_lin = nn.Linear(attention_size, attention_size)
self.LexAttention = ga.GlobalAttention([self.hidden_dim, attention_size, attention_size], attention_size)
self.SemAttention = ga.GlobalAttention([self.hidden_dim, self.hidden_dim, self.hidden_dim], sem_attention_size)
self.SLM = SubLabelModule(nn_params, input_size)
self.RNNInputNorm = nn.LayerNorm( self.hidden_dim )
self.fw_RNN = ContextualFeatureEncoder(self.hidden_dim, self.hidden_dim, self.batch_size, 1, False, nn_params.dtype, nn_params.contextual_encoder_type)
self.hid_dropout_p = nn_params.hidden_dropout # NEW
self.hidden_dropout = nn.Dropout(p=nn_params.hidden_dropout)
self.MLPInputNorm = nn.LayerNorm( self.hidden_dim )
self.FWOutputNorm = nn.LayerNorm( self.tagset_size )
self.output_mlp = ReLU_MLP( [2,self.hidden_dim, self.hidden_dim] )
output_dim = self.hidden_dim
if self.n_subparts > 0:
output_dim = output_dim + nn_params.sublabel_hidden_dim
output_dim = output_dim + nn_params.attention_heads * sem_attention_size
self.hidden2tag = nn.Linear(output_dim, self.tagset_size)
#self.dir_hidden = torch.zeros(1, self.batch_size, self.hidden_dim).type(self.dtype) #VARIABLE
def init_hidden(self):
#self.dir_hidden = torch.zeros(1, self.batch_size, self.hidden_dim).type(self.dtype) #VARIABLE
self.fw_RNN.init_hidden()
self.SLM.init_hidden()
def resize_embeddings(self, nn_params):
if nn_params.tag_vocab_size > self.tagset_size:
old_embeddings = self.fw_label_embeddings
self.fw_label_embeddings = nn.Embedding(nn_params.tag_vocab_size, nn_params.label_embed_dim, sparse=False)
self.fw_label_embeddings.weight[:self.tagset_size,:] = old_embeddings.weight
old_lin = self.hidden2tag
output_dim = self.hidden_dim
if self.n_subparts > 0:
output_dim = output_dim + nn_params.sublabel_hidden_dim
self.hidden2tag = nn.Linear(output_dim, nn_params.tag_vocab_size)
self.hidden2tag.weight[:self.tagset_size,:] = old_lin.weight
old_norm = self.FWOutputNorm
self.FWOutputNorm = nn.LayerNorm(nn_params.tag_vocab_size)
self.FWOutputNorm.weight[:self.tagset_size] = old_norm.weight
self.tagset_size = nn_params.tag_vocab_size
def train_forward(self, input, fw_streams, bw_states):
dims = input[0].size()
sequence_length = dims[0]
batch_size = self.batch_size
fw_label_streams = fw_streams[0]
prev_sublabels = fw_streams[1]
indeces = decoding_indeces_(self.direction, sequence_length, self.output_length_factor)
source_length = sequence_length
sequence_length = len(indeces)
gold_sequence_length = fw_label_streams[0].size(0)
gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)
source_idxs = [int( i / self.output_length_factor ) for i in indeces]
target_idxs = [int( i * gold_to_hyp_length_factor ) for i in indeces]
input_tsr = torch.cat( input, 2 )[source_idxs,:,:]
local_input = [input_tsr]
local_input.append( self.embed_dropout( self.fw_label_embeddings(fw_label_streams[0][target_idxs,:]) ) )
# TMP FOR DEBUG
if self.debug_flag:
print('')
print(' ************************************************')
print(' * BidirectionalDecoder.train_forward -')
print('')
print(' *** indeces ({}): {}'.format(len(indeces), list(indeces)))
print(' *** source_idxs ({}): {}'.format(len(source_idxs), source_idxs))
print(' *** target_idxs ({}): {}'.format(len(target_idxs), target_idxs))
print('*')
print(' * Size of input: {}'.format( torch.cat(input, 2).size() ))
print(' * Size of local_input: {}'.format( torch.cat(local_input, 2).size() ))
print(' * Size of bw_label_streams: {}'.format(fw_label_streams[0].size()))
print(' *')
print(' * BidirectionalDecoder.train_forward, forward sublabels and labels:')
for tgt_idx in target_idxs:
# print(' {}'.format([self.ix_to_sublabel[sl.item()] for sl in prev_sublabels[tgt_idx,:,0]]))
print(' -----')
print('@{}, {}'.format(tgt_idx, self.label_dict.index2token(fw_label_streams[0][tgt_idx,0])))
print('')
print(' * BidirectionalDecoder.train_forward, len of local_input: {}'.format(len(local_input)))
for debug_idx in range(len(local_input)):
print(' * {}'.format(local_input[debug_idx].size()))
print(' ---')
#print(' * BidirectionalDecoder.train_forward, size of prev_sublabels: {}'.format(prev_sublabels.size()))
print(' * BidirectionalDecoder.train_forward, size of fw_label_streams[0]: {}'.format(fw_label_streams[0].size()))
#print(' ***********************************************************')
#print('')
# END TMP FOR DEBUG
fw_sublabels_rep = []
if self.n_subparts > 0:
fw_sublabels_rep = self.SLM( input_tsr, prev_sublabels[target_idxs,:,:], 1 )
# TMP FOR DEBUG
if self.debug_flag:
#print(' * BidirectionalDecoder.train_forward, size of fw_sublabels_rep: {}'.format(fw_sublabels_rep[0].size()))
print(' ***********************************************************')
sys.stdout.flush()
#local_input = local_input + fw_sublabels_rep
fw_total_input = self.FWInputNorm( torch.cat( local_input, 2 ) )
rnn_input = self.RNNInputNorm( self.HiddenSizeMap( fw_total_input ) )
fw_hidden_state, self.dir_hidden = self.fw_RNN( rnn_input )
fw_mlp_input = rnn_input + self.hidden_dropout( fw_hidden_state )
deep_reps = self.output_mlp( self.MLPInputNorm( fw_mlp_input ) )
fw_final_input = torch.cat( [fw_mlp_input + self.hidden_dropout( deep_reps + bw_states[0][indeces] )] + fw_sublabels_rep, -1 )
fw_scores = F.log_softmax( self.FWOutputNorm( self.hidden2tag( fw_final_input ) ), dim=2 )
return (fw_hidden_state, fw_scores)
# NOTE: we assume "bw_states" contains backward hidden states and backward predictions, this and only this information, and in this order.
# OBSOLETE: remove it !
def fast_forward(self, input, fw_streams, bw_states):
vflag = (self.TEST == 1)
if self.TeachingSignal and (not vflag):
#print(' * BidirectionalDecoder.train_forward...')
#sys.stdout.flush()
return self.train_forward(input, fw_streams, bw_states)
else:
#print(' * BidirectionalDecoder.test_forward...')
#sys.stdout.flush()
return self.test_forward(input, fw_streams, bw_states)
# NOTE: we assume "bw_states" contains backward hidden states and backward predictions, this and only this information, and in this order.
def test_forward(self, input, fw_streams, bw_states):
# NOTE: we assume the first element of input is the lexical-level representation computed by the encoder, that is its hidden state.
vflag = (self.TEST == 1)
dims = input[0].size()
sequence_length = dims[0]
batch_size = self.batch_size
fw_label_streams = fw_streams[0]
target_length = bw_states[0].size(0)
indeces = decoding_indeces_(self.direction, target_length, 1.0) # We use the length of the output sequence predicted by a previous simple-decoder
source_length = sequence_length
sequence_length = len(indeces)
gold_sequence_length = fw_label_streams[0].size(0)
gold_to_hyp_length_factor = float(gold_sequence_length) / float(sequence_length)
embedding_mask = dropout_mask_dims( [1, batch_size, self.label_embed_dim], self.emb_dropout_p, self.dtype)
hidden_layer_mask = dropout_mask_dims( [batch_size, self.hidden_dim], self.hid_dropout_p, self.dtype)
if vflag:
embedding_mask = torch.ones( [1, batch_size, self.label_embed_dim] ).type(self.dtype)
hidden_layer_mask = torch.ones( [batch_size, self.hidden_dim] ).type(self.dtype)
fw_hidden_state = torch.zeros(sequence_length, batch_size, self.hidden_dim).type(self.dtype) #VARIABLE
fw_scores = torch.zeros(sequence_length, batch_size, self.tagset_size).type(self.dtype) #VARIABLE
start_idx = 0
if self.direction == 1 or self.direction == 3:
start_idx = -1
prev_labels = fw_label_streams[0][start_idx,:]
prev_input = torch.cat( input, 2 )
prev_sublabels = fw_streams[1] #VARIABLE
# NEW: TEST IT!!!
bin_size = 1
if self.output_length_factor < 1.0:
bin_size = int(1 / self.output_length_factor) + 1
self.fw_RNN.set_hidden_state( bw_states[0][0,:,:].view(1, batch_size, -1))
for i in indeces:
source_idx = int( i / self.output_length_factor )
bin_bound = min(source_length,source_idx+bin_size) # NEW: TEXT IT!!!
target_idx = int( i * gold_to_hyp_length_factor )
if self.TeachingSignal and (not vflag):
prev_labels = fw_label_streams[0][target_idx,:]
if self.n_subparts > 0:
prev_sublabels = fw_streams[1][target_idx,:,:] #VARIABLE #GRAPHCHECKPOINT
curr_lex_input = torch.sum(prev_input[source_idx:bin_size,:,:],0) ## This is ~different in 'train_forward'
#curr_lex_input = prev_input[source_idx,:,:]
fw_sublabels_rep = self.SLM( curr_lex_input, prev_sublabels, 0 )
fw_total_input_lst = [curr_lex_input.view(1, batch_size, -1)] # SOURCE INDEXING # NEW: TEST IT!!!
fw_total_input_lst.append( self.embed_dropout( self.fw_label_embeddings( prev_labels ).view(1, batch_size, -1) ) )
if self.attention_heads > 0:
c, alphas = self.LexAttention( fw_hidden_state[i,:,:].clone().view(batch_size, 1, -1), prev_input.transpose(0, 1).contiguous() )
#fw_total_input_lst.append( c )
g_lambda = F.sigmoid( self.h_lin( fw_total_input_lst[0] ) + self.a_lin(c) )
fw_total_input_lst[0] = g_lambda * fw_total_input_lst[0] + (1.0 - g_lambda) * c
fw_total_input = self.FWInputNorm( torch.cat( fw_total_input_lst, 2 ) )
rnn_input = self.RNNInputNorm( self.hidden_dropout( self.HiddenSizeMap( fw_total_input ) ) )
_, dec_hidden_state = self.fw_RNN( rnn_input )
#fw_hidden_state[i,:,:] = dec_hidden_state[0,:,:]
#mlp_input = fw_total_input[0] + hidden_layer_mask*( dec_hidden_state[0,:,:] )
mlp_input = self.MLPInputNorm( rnn_input[0] + self.hidden_dropout( dec_hidden_state[0,:,:] ) )
deep_reps = self.output_mlp( mlp_input )
dec_final_state = mlp_input + self.hidden_dropout(deep_reps)
fw_hidden_state[i,:,:] = dec_final_state
atts = []
if self.attention_heads > 0:
sem_c, sem_alphas = self.SemAttention(dec_final_state.clone().view(batch_size, 1, -1), bw_states[0].transpose(0, 1).contiguous())
atts = [sem_c.view(batch_size, -1)]
#fw_final_input = torch.cat( [mlp_input + self.hidden_dropout(deep_reps) + bw_states[0][i,:,:]] + fw_sublabels_rep + atts, -1 )
fw_final_input = torch.cat( [dec_final_state + bw_states[0][i,:,:]] + fw_sublabels_rep + atts, -1 )
#fw_scores[i,:,:] = F.log_softmax( self.hidden2tag( fw_final_input + torch.sum( hidden_layer_mask*( torch.stack(fw_sem_atts) ) )), dim=1 )
fw_scores[i,:,:] = F.log_softmax( self.FWOutputNorm( self.hidden2tag( fw_final_input ) ), dim=1 )
(max_scores, max_indeces) = torch.max(fw_scores[i,:,:], 1)
max_indeces = max_indeces.squeeze()
if vflag:
prev_labels = max_indeces
prev_labels = prev_labels.view(self.batch_size)
max_indeces = max_indeces.unsqueeze(0)
if self.n_subparts > 0:
prev_sublabels = torch.LongTensor(self.tag_to_subparts[max_indeces].transpose(0,1)).type(self.ltype) #VARIABLE #GRAPHCHECKPOINT
return (fw_hidden_state, fw_scores)
def forward(self, input, fw_streams, bw_states):
# TMP FOR DEBUG
#self.train_forward(input, fw_streams, bw_states)
return self.test_forward(input, fw_streams, bw_states)
def set_batch_size(self, val):
self.batch_size = val
if self.n_subparts > 0:
self.sl_batch_size = val
self.fw_RNN.set_batch_size( val )
self.SLM.set_batch_size(val)
def set_test_mode(self, val):
self.TEST = val
self.fw_RNN.set_test_mode( val )
def set_teaching_signal_flag(self, val):
self.TeachingSignal = val
# ---------- Models for Speech decoding ----------
class Conv1dNormWrapper(nn.Module):
'''
class Conv1dNormWrapper
Wrap a Conv1d class to be used in a nn.Sequential module, adding a layer normalization module.
'''
def __init__(self, input_size, output_size, kernel, stride_factor):
super(Conv1dNormWrapper,self).__init__()
self.conv = nn.Conv1d(input_size, output_size, kernel, stride=stride_factor)
self.cNorm = nn.LayerNorm( output_size )
def forward(self, input):
return self.cNorm( self.conv( input ).permute(2,0,1) ).permute(1,2,0)
class LSTMWrapper(nn.Module):
'''
LSTMWrapper
Wrap a LSTM layer to be used in a nn.Sequential module.
'''
def __init__(self, input_size, output_size, bidirFlag):
super(LSTMWrapper,self).__init__()
self.lstm = nn.LSTM(input_size, output_size, bidirectional=bidirFlag)
def forward(self, input):
output, _ = self.lstm( input )
return output
class BasicEncoder(nn.Module):
def __init__(self, params):
super(BasicEncoder,self).__init__()
#self.window_size = params.window_size
# Parameter initialization
# 1. Size of convolution layer
self.input_size = params.num_features
self.input_conv = self.input_size
self.speech_conv_size = params.speech_conv_size
# 2. Size of LSTM layer
self.input_size_lstm = self.speech_conv_size
self.hidden_size = params.speech_lstm_size
# 3. Size of the output, that is of the linear layer
self.output_size = params.output_size
self.num_conv = params.speech_conv
self.num_lstm_layers = params.num_lstm_layers
self.conv_kernel = params.conv_kernel
self.conv_kernel_width = params.conv_kernel_width
self.conv_kernel_height = params.conv_kernel_height
self.conv2d_dim = params.small_dim
self.kernel_2d_hw_ratio = params.kernel_2d_hw_ratio
self.stride_factor1 = params.conv_stride1
self.stride_factor2 = params.conv_stride2
# Layer initialization
# 1. Convolutions
conv_layers = []
for i in range(self.num_conv):
conv_stride = 1
if i == self.num_conv-1:
conv_stride = 2
input_size = self.speech_conv_size
if i == 0:
input_size = self.input_conv
conv_layers.append( ('Conv'+str(i+1), Conv1dNormWrapper(input_size, self.speech_conv_size, self.conv_kernel, conv_stride)) )
conv_layers.append( ('Dropout'+str(i+1), nn.Dropout(p=params.drop_ratio)) )
#conv_layers.append( ('ConvNorm'+str(i+1), nn.BatchNorm1d( self.speech_conv_size )) )
self.convolutions = nn.Sequential( OrderedDict(conv_layers) )
'''#self.conv1 = nn.Conv2d(self.input_conv,self.speech_conv_size, (self.conv_kernel_width, self.conv_kernel_height), stride=(self.stride_factor1, self.stride_factor1))
self.conv1 = nn.Conv1d(self.input_conv,self.speech_conv_size, self.conv_kernel, stride=self.stride_factor1)
#self.conv2 = nn.Conv1d(self.speech_conv_size,self.speech_conv_size,self.conv_kernel,stride=self.stride_factor2)'''
#self.CONV_norm = nn.LayerNorm( self.speech_conv_size )
# 2. Recurrent layers
recurrent_layers = []
for i in range(self.num_lstm_layers):
input_size = 2*self.hidden_size
if i == 0:
input_size = self.input_size_lstm
recurrent_layers.append( ('LSTM'+str(i+1), LSTMWrapper(input_size, self.hidden_size, True)) )
recurrent_layers.append( ('ConvNorm'+str(i+1), nn.LayerNorm( 2*self.hidden_size )) )
recurrent_layers.append( ('Dropout'+str(i+1), nn.Dropout(p=params.drop_ratio)) )
self.rnns = nn.Sequential( OrderedDict(recurrent_layers) )
#self.h_dropout = nn.Dropout(p=params.drop_ratio)
#self.LSTM_norm = nn.LayerNorm(self.hidden_size*2)
#self.rnns = nn.LSTM(self.input_size_lstm,self.hidden_size,num_layers = self.num_lstm_layers,bidirectional=True)
#Linear Layer
self.linear_layer = nn.Linear(2*self.hidden_size, self.output_size)
#small_dim = int( math.sqrt(seq_len / hw_ratio) + 0.5 )
#x_pad = torch.randn(num_features, batch_size, small_dim * hw_ratio * small_dim - seq_len)
#x_padded = torch.cat( [x, x_pad], 2 )
#x_conv = x_padded.view(num_features, batch_size, hw_ratio*small_dim, small_dim)
'''
print(' *** Initializing BasicEncoder:')
print(' * Input size: {}'.format(params.num_features))
print(' * Output size: {}'.format(params.output_size))
print(' * Convolution size: {}'.format(params.speech_conv_size))
print(' * Hidden size: {}'.format(params.speech_lstm_size))
print(' -')
print(' * Stride factor 1: {}'.format(params.conv_stride1))
print(' * Stride factor 2: {}'.format(params.conv_stride2))
print(' * Num. LSTM layers: {}'.format(params.num_lstm_layers))
print(' ***')
'''
def forward(self, x):
# Input has shape (sequence_length, batch_size, num. of channels), that is (L, N, C), convolution needs it to be (N, C, L)
# 1. For Conv2d
#(L, N, C) = x.size()
#small_dim = int( math.sqrt(float(L) / float(self.kernel_2d_hw_ratio)) )
#out = self.conv1( x.permute(1, 2, 0).view(N, C, small_dim * self.kernel_2d_hw_ratio, small_dim) )
#out = self.h_dropout( out.view(N, self.speech_conv_size, -1).permute(2,0,1) )
# ---------------------
'''# 2. For Conv1d
out = self.conv1( x.permute(1, 2, 0) )
out = self.h_dropout( out.permute(2,0,1) )
# ---------------------
#out = self.conv2(x)
output, _ = self.rnns( self.conv_output_norm( out ) )
output = self.h_dropout(output)
output = self.linear_layer( self.LSTM_norm(output) )
#output = self.log_softmax(output)'''
# New forward code with generic layer structures
out = self.convolutions( x.permute(1, 2, 0) )
#out = self.rnns( self.CONV_norm( out.permute(2,0,1) ) )
#output = self.linear_layer( self.LSTM_norm( out ) )
out = self.rnns( out.permute(2, 0, 1) )
output = self.linear_layer( out )
return (output, output, out)
class BasicSpeechEncoder(nn.Module):
def __init__(self, params, nn_params):
super(BasicSpeechEncoder,self).__init__()
self.speaker_val = [globals.user_speaker_val]
self.encoder = BasicEncoder(params)
self.log_softmax = nn.LogSoftmax(dim = 2)
def get_fw_parameters(self):
return self.parameters()
def get_bw_parameters(self):
return self.get_fw_parameters()
def forward(self, x, next_labels, prev_labels):
(representations, reps, hidden_states) = self.encoder( x )
scores = self.log_softmax( representations )
return (scores, scores, hidden_states) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE
#return (scores, representations)
def set_test_mode(self, val):
return
def set_teaching_signal_flag(self, val):
return
def set_speaker_val(self, val):
self.speaker_val = val
def pad_input(self, input, val):
self.speaker_val = val
(sequence_length, batch_size, num_features) = input.size()
padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)
#SpkID = torch.ones_like(input)
for i in range( batch_size ):
padder[:,i,:] = self.speaker_val[i]
#SpkID[:,i,:] = SpkID[:,i,:] * self.speaker_val[i] * 0.002
#return torch.cat( [padder, input + SpkID, padder], 0 )
return torch.cat( [padder, input, padder], 0 )
class BasicSpeechSeqEncoder(nn.Module):
def __init__(self, params, nn_params):
super(BasicSpeechSeqEncoder,self).__init__()
self.speaker_val = [globals.user_speaker_val]
self.encoder = BasicEncoder(params)
self.seq_encoder = SimpleDecoder(nn_params, 2*params.speech_lstm_size, 0)
def get_fw_parameters(self):
return self.parameters()
def get_bw_parameters(self):
return self.get_fw_parameters()
def forward(self, x, next_labels, prev_labels):
(sequence_length, batch_size, num_features) = x.size()
self.seq_encoder.set_batch_size( batch_size )
(representations, reps, hidden_states) = self.encoder(x)
(prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0]))
fw_streams = (prev_labels, prev_sublabels)
self.seq_encoder.init_hidden()
(fw_hidden_state, fw_scores) = self.seq_encoder([hidden_states], fw_streams) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE
return (fw_scores, fw_scores, fw_hidden_state)
def set_test_mode(self, val):
self.seq_encoder.set_test_mode( val )
def set_teaching_signal_flag(self, val):
self.seq_encoder.set_teaching_signal_flag( val )
def load_encoder(self, bsencoder):
self.encoder.load_state_dict( bsencoder.encoder.state_dict() )
def set_speaker_val(self, val):
self.speaker_val = val
def pad_input(self, input, val):
self.speaker_val = val
(sequence_length, batch_size, num_features) = input.size()
padder = torch.cuda.FloatTensor(1, batch_size, num_features)
for i in range( batch_size ):
padder[:,i,:] = self.speaker_val[i]
return torch.cat( [padder, input, padder], 0 )
class BasicSpeechBiseqEncoder(nn.Module):
def __init__(self, params, nn_params):
super(BasicSpeechBiseqEncoder,self).__init__()
self.speaker_val = [globals.user_speaker_val]
self.encoder = BasicEncoder(params)
#self.seq_encoder = SimpleDecoder(nn_params, params.output_size, 0)
#self.seq_encoder = SimpleDecoder(nn_params, params.output_size, 2) # NEW: TEST IT!!!
self.bw_seq_encoder = SimpleDecoder(nn_params, 2*params.speech_lstm_size, 1) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE
#self.log_softmax = nn.LogSoftmax(dim = 2)
self.fw_seq_encoder = BidirectionalDecoder(nn_params, 2*params.speech_lstm_size, 0)
def get_fw_parameters(self):
return list(filter(lambda p: p.requires_grad, self.encoder.parameters())) + list(filter(lambda p: p.requires_grad, self.fw_seq_encoder.parameters()))
def get_bw_parameters(self):
return list(filter(lambda p: p.requires_grad, self.encoder.parameters())) + list(filter(lambda p: p.requires_grad, self.bw_seq_encoder.parameters()))
def forward(self, x, next_labels, prev_labels):
(sequence_length, batch_size, num_features) = x.size()
self.fw_seq_encoder.set_batch_size( batch_size )
self.bw_seq_encoder.set_batch_size( batch_size )
(representations, reps, hidden_states) = self.encoder(x)
(prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0]))
fw_streams = (prev_labels, prev_sublabels)
bw_streams = (next_labels, next_sublabels)
self.bw_seq_encoder.init_hidden()
self.fw_seq_encoder.init_hidden()
#(fw_hidden_state, fw_scores) = self.seq_encoder([representations], fw_streams)
(bw_hidden_state, bw_scores) = self.bw_seq_encoder([hidden_states], bw_streams) # SWITCH TO THIS FOR RICH-REPRESENTATION ARCHITECTURE
(fw_hidden_state, fw_scores) = self.fw_seq_encoder([hidden_states], fw_streams, [bw_hidden_state, bw_scores])
global_scores = 0.5 * (fw_scores + bw_scores)
return (fw_scores, bw_scores, fw_hidden_state)
def set_test_mode(self, val):
self.bw_seq_encoder.set_test_mode( val )
self.fw_seq_encoder.set_test_mode( val )
def set_teaching_signal_flag(self, val):
self.bw_seq_encoder.set_teaching_signal_flag( val )
self.fw_seq_encoder.set_teaching_signal_flag( val )
def load_encoder(self, bsencoder):
self.encoder.load_state_dict( bsencoder.encoder.state_dict() )
def set_speaker_val(self, val):
self.speaker_val = val
def pad_input(self, input, val):
self.speaker_val = val
(sequence_length, batch_size, num_features) = input.size()
padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)
for i in range( batch_size ):
padder[:,i,:] = self.speaker_val[i]
return torch.cat( [padder, input, padder], 0 )
#self.speaker_val = val
#(sequence_length, batch_size, num_features) = input.size()
#padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)
#SpkID = torch.ones_like(input)
#for i in range( batch_size ):
# padder[:,i,:] = self.speaker_val[i]
# SpkID[:,i,:] = SpkID[:,i,:] * self.speaker_val[i] * 0.002
#return torch.cat( [padder, input + SpkID, padder], 0 )
class MLSpeechEncoder(nn.Module):
def __init__(self, ch_params, tk_params, nn_params):
super(MLSpeechEncoder,self).__init__()
self.speaker_val = [globals.user_speaker_val]
self.char_encoder = BasicSpeechEncoder(ch_params, nn_params)
self.token_encoder = BasicSpeechEncoder(tk_params, nn_params)
def get_fw_parameters(self):
return self.parameters()
def get_bw_parameters(self):
return self.get_fw_parameters()
def forward(self, x, next_labels, prev_labels):
(ch_scores, ch_sc, ch_reps) = self.char_encoder(x, next_labels, prev_labels)
(tk_scores, tk_sc, tk_reps) = self.token_encoder(ch_reps, next_labels, prev_labels)
return (tk_scores, tk_scores, tk_reps)
def load_char_encoder(self, char_encoder):
self.char_encoder.encoder.load_state_dict( char_encoder.encoder.state_dict() )
#for param in self.char_encoder.encoder.parameters():
# param.requires_grad = False
def freeze_char_encoder(self):
for param in self.char_encoder.parameters():
param.requires_grad = False
def unfreeze_char_encoder(self):
for param in self.char_encoder.parameters():
param.requires_grad = True
def load_token_encoder(self, token_encoder):
self.token_encoder.encoder.rnns.load_state_dict( token_encoder.encoder.rnns.state_dict() )
def set_test_mode(self, val):
return
def set_teaching_signal_flag(self, val):
return
def set_speaker_val(self, val):
self.speaker_val = val
def pad_input(self, input, val):
self.speaker_val = val
(sequence_length, batch_size, num_features) = input.size()
padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)
for i in range( batch_size ):
padder[:,i,:] = self.speaker_val[i]
return torch.cat( [padder, input, padder], 0 )
class MLSpeechSeqEncoder(nn.Module):
def __init__(self, ch_params, tk_params, nn_params):
super(MLSpeechSeqEncoder,self).__init__()
self.speaker_val = [globals.user_speaker_val]
self.char_encoder = BasicSpeechEncoder(ch_params, nn_params)
self.token_encoder = BasicSpeechSeqEncoder(tk_params, nn_params)
def get_fw_parameters(self):
return self.char_encoder.get_fw_parameters() + self.token_encoder.get_fw_parameters()
def get_bw_parameters(self):
return self.char_encoder.get_bw_parameters() + self.token_encoder.get_bw_parameters()
def forward(self, x, next_labels, prev_labels):
(ch_scores, ch_sc, ch_reps) = self.char_encoder(x, next_labels, prev_labels)
(fw_tk_scores, bw_tk_scores, tk_reps) = self.token_encoder(ch_reps, next_labels, prev_labels)
return (fw_tk_scores, bw_tk_scores, tk_reps)
def load_char_encoder(self, char_encoder):
self.char_encoder.encoder.load_state_dict( char_encoder.encoder.state_dict() )
#for param in self.char_encoder.encoder.parameters():
# param.requires_grad = False
def freeze_char_encoder(self):
for param in self.char_encoder.parameters():
param.requires_grad = False
def unfreeze_char_encoder(self):
for param in self.char_encoder.parameters():
param.requires_grad = True
def load_token_encoder(self, token_encoder):
self.token_encoder.encoder.rnns.load_state_dict( token_encoder.encoder.rnns.state_dict() )
self.token_encoder.bw_seq_encoder.load_state_dict( token_encoder.bw_seq_encoder.state_dict() )
self.token_encoder.fw_seq_encoder.load_state_dict( token_encoder.fw_seq_encoder.state_dict() )
def load_ml_encoder(self, ml_encoder):
self.char_encoder.load_state_dict( ml_encoder.char_encoder.state_dict() )
#print(' -- MLSpeechSeqEncoder: freezing char-encoder parameters...')
#for param in self.char_encoder.parameters():
# param.requires_grad = False
self.token_encoder.encoder.load_state_dict( ml_encoder.token_encoder.encoder.state_dict() )
#print(' -- MLSpeechSeqEncoder: freezing token-encoder (encoder only) parameters...')
#sys.stdout.flush()
#for param in self.token_encoder.encoder.parameters():
# param.requires_grad = False
def load_ml_seq_decoder(self, ml_encoder):
self.char_encoder.load_state_dict( ml_encoder.char_encoder.state_dict() )
self.token_encoder.load_state_dict( ml_encoder.token_encoder.state_dict() )
def set_test_mode(self, val):
self.token_encoder.set_test_mode( val )
def set_teaching_signal_flag(self, val):
self.token_encoder.set_teaching_signal_flag( val )
def set_speaker_val(self, val):
self.speaker_val = val
def pad_input(self, input, val):
self.speaker_val = val
(sequence_length, batch_size, num_features) = input.size()
padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)
for i in range( batch_size ):
padder[:,i,:] = self.speaker_val[i]
return torch.cat( [padder, input, padder], 0 )
# ---------- Models for End-to-end SLU ----------
class SLUSimpleDecoder(nn.Module):
def __init__(self, ch_params, tk_params, nn_params):
super(SLUSimpleDecoder,self).__init__()
self.speaker_val = [globals.user_speaker_val]
tmp = nn_params.tag_vocab_size
nn_params.tag_vocab_size = nn_params.sd_tag_vocab_size
decoder_output_size = 0
if nn_params.train_char_decoder or nn_params.load_char_decoder:
print(' -- SLUSimpleDecoder: using character speech decoder')
sys.stdout.flush()
self.speech_decoder = BasicSpeechSeqEncoder(ch_params, nn_params)
decoder_output_size = nn_params.hidden_dim
elif nn_params.train_token_decoder or nn_params.load_token_decoder:
print(' -- SLUSimpleDecoder: using token speech decoder')
sys.stdout.flush()
self.speech_decoder = BasicSpeechSeqEncoder(tk_params, nn_params)
decoder_output_size = nn_params.hidden_dim
elif nn_params.train_ml_decoder or nn_params.load_ml_decoder:
print(' -- SLUSimpleDecoder: using 2-stage token speech decoder')
sys.stdout.flush()
self.speech_decoder = MLSpeechSeqEncoder(ch_params, tk_params, nn_params)
decoder_output_size = nn_params.hidden_dim
nn_params.tag_vocab_size = tmp
nn_params.label_embed_dim = 2 * nn_params.label_embed_dim
nn_params.hidden_dim = 2 * nn_params.hidden_dim
self.slu_decoder = SimpleDecoder(nn_params, decoder_output_size, 0)
def get_fw_parameters(self):
return self.speech_decoder.get_fw_parameters() + list(filter(lambda p: p.requires_grad, self.slu_decoder.parameters()))
def get_bw_parameters(self):
return self.speech_decoder.get_bw_parameters() + list(filter(lambda p: p.requires_grad, self.slu_decoder.parameters()))
def forward(self, input, bw_label_streams, fw_label_streams):
(prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0])) #VARIABLE x2
fw_streams = (fw_label_streams, prev_sublabels)
bw_streams = (bw_label_streams, next_sublabels)
#(sequence_length, batch_size, num_features) = input.size()
#padder = torch.cuda.FloatTensor(1, batch_size, num_features)
#for i in range( batch_size ):
# padder[:,i,:] = self.speaker_val[i]
#padded_input = torch.cat( [padder, input, padder], 0 )
self.slu_decoder.set_batch_size( batch_size )
(fw_tk_scores, bw_tk_scores, tk_reps) = self.speech_decoder(input, bw_label_streams, fw_label_streams)
self.slu_decoder.init_hidden()
(sem_hidden_states, sem_scores) = self.slu_decoder([tk_reps], fw_streams)
return (sem_scores, sem_scores, sem_hidden_states)
def load_speech_encoder(self, speech_encoder):
self.speech_decoder.load_state_dict( speech_encoder.state_dict() )
if isinstance(speech_encoder, MLSpeechSeqEncoder):
print(' -- SLUSimpleDecoder: freezing speech-encoder parameters...')
sys.stdout.flush()
for param in self.speech_decoder.char_encoder.parameters():
param.requires_grad = False
def set_speaker_val(self, val):
self.speaker_val = val
def pad_input(self, input, val):
self.speaker_val = val
(sequence_length, batch_size, num_features) = input.size()
padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)
for i in range( batch_size ):
padder[:,i,:] = self.speaker_val[i]
return torch.cat( [padder, input, padder], 0 )
def set_test_mode(self, val):
self.speech_decoder.set_test_mode( val )
self.slu_decoder.set_test_mode( val )
def set_teaching_signal_flag(self, val):
self.speech_decoder.set_teaching_signal_flag( val )
self.slu_decoder.set_teaching_signal_flag( val )
class SLUBiDecoder(nn.Module):
def __init__(self, ch_params, tk_params, nn_params):
super(SLUBiDecoder,self).__init__()
self.speaker_val = [globals.user_speaker_val]
tmp = nn_params.tag_vocab_size
nn_params.tag_vocab_size = nn_params.sd_tag_vocab_size
decoder_output_size = 0
if nn_params.train_char_decoder or nn_params.load_char_decoder:
print(' -- SLUBiDecoder: using character speech decoder')
sys.stdout.flush()
self.speech_decoder = BasicSpeechSeqEncoder(ch_params, nn_params)
decoder_output_size = nn_params.hidden_dim
elif nn_params.train_token_decoder or nn_params.load_token_decoder:
print(' -- SLUBiDecoder: using token speech decoder')
sys.stdout.flush()
self.speech_decoder = BasicSpeechSeqEncoder(tk_params, nn_params)
decoder_output_size = nn_params.hidden_dim
elif nn_params.train_ml_decoder or nn_params.load_ml_decoder:
print(' -- SLUBiDecoder: using 2-stage token speech decoder')
sys.stdout.flush()
self.speech_decoder = MLSpeechSeqEncoder(ch_params, tk_params, nn_params)
decoder_output_size = nn_params.hidden_dim
nn_params.tag_vocab_size = tmp
nn_params.label_embed_dim = 2 * nn_params.label_embed_dim
nn_params.hidden_dim = 2 * nn_params.hidden_dim
self.bw_slu_decoder = SimpleDecoder(nn_params, decoder_output_size, 1)
self.fw_slu_decoder = BidirectionalDecoder(nn_params, decoder_output_size, 0)
def forward(self, input, bw_label_streams, fw_label_streams):
(prev_sublabels, next_sublabels) = (torch.LongTensor([0]),torch.LongTensor([0])) #VARIABLE x2
fw_streams = (fw_label_streams, prev_sublabels)
bw_streams = (bw_label_streams, next_sublabels)
self.bw_slu_decoder.set_batch_size( batch_size )
self.fw_slu_decoder.set_batch_size( batch_size )
(fw_tk_scores, bw_tk_scores, tk_reps) = self.speech_decoder(input, bw_label_streams, fw_label_streams)
self.bw_slu_decoder.init_hidden()
self.fw_slu_decoder.init_hidden()
(sem_bw_hidden_states, sem_bw_scores) = self.bw_slu_decoder([tk_reps], bw_streams)
(sem_fw_hidden_states, sem_fw_scores) = self.fw_slu_decoder([tk_reps], fw_streams, [sem_bw_hidden_states, sem_bw_scores])
global_scores = 0.5 * (sem_fw_scores + sem_bw_scores)
return (global_scores, sem_bw_scores, sem_hidden_states)
def load_speech_encoder(self, speech_encoder):
self.speech_decoder.load_state_dict( speech_encoder.state_dict() )
if isinstance(speech_encoder, MLSpeechSeqEncoder):
print(' -- SLUBiDecoder: freezing speech-encoder parameters...')
sys.stdout.flush()
for param in self.speech_decoder.char_encoder.parameters():
param.requires_grad = False
def set_speaker_val(self, val):
self.speaker_val = val
def pad_input(self, input, val):
self.speaker_val = val
(sequence_length, batch_size, num_features) = input.size()
padder = torch.FloatTensor(1, batch_size, num_features).to(input.device)
for i in range( batch_size ):
padder[:,i,:] = self.speaker_val[i]
return torch.cat( [padder, input, padder], 0 )
def set_test_mode(self, val):
self.speech_decoder.set_test_mode( val )
self.slu_decoder.set_test_mode( val )
def set_teaching_signal_flag(self, val):
self.speech_decoder.set_teaching_signal_flag( val )
self.slu_decoder.set_teaching_signal_flag( val )
| []
| []
| [
"RNNTAGGERPATH"
]
| [] | ["RNNTAGGERPATH"] | python | 1 | 0 | |
main.py | from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from discord.ext import commands
import discord
import os
from dotenv import load_dotenv
import requests
import json
from datetime import datetime, timedelta, date
import pytz
from ClassAssistant import ClassAssistantBot
#INITIALIZE DISCORD BOT
load_dotenv('.env')
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
client = discord.Client()
bot = commands.Bot(command_prefix="!")
CHANNEL_ID = 879663995852849193 #DEV Server
#INITIALIZE THE ACTUAL PROGRAM
IFBot = ClassAssistantBot()
tugas_schedule_list = IFBot.get_jadwal_json()
#INITIALIZE DATE TIME
now = datetime.now()
tz = pytz.timezone('Asia/Jakarta')
tz_jkt = now.replace(tzinfo=tz)
day_list = {'Minggu':'Sunday', 'Senin':'Monday', 'Selasa':'Tuesday', 'Rabu':'Wednesday', 'Kamis':'Thursday', 'Jumat':'Friday', 'Sabtu':'Saturday'}
deadline_format = "%d %B, %Y - %H:%M"
print(tz_jkt)
def get_words():
repsonse = requests.get("http://zenquotes.io/api/random")
json_data = json.loads(repsonse.text)
quote = json_data[0]['q'] + " -" + json_data[0]['a']
return(quote)
scheduler = AsyncIOScheduler({'apscheduler.timezone': 'UTC'})
@bot.command()
async def today(ctx):
matkul_info_message = IFBot.matkul_data_to_message(IFBot.get_matkul_schedule(), "Today")
c = bot.get_channel(CHANNEL_ID)
await c.send(matkul_info_message)
@bot.command(name='task_new')
async def new_task(ctx, matkul_name, tugas_name, desc, deadline):
tugas_schedule_list = IFBot.get_jadwal_json()
tugas_schedule_list['jadwal_tugas'].append(
{
"matkul_name": str(matkul_name).capitalize(),
"tugas_name": str(tugas_name).capitalize(),
"desc": str(desc).capitalize(),
"date_end": str(deadline),
"day_end": datetime.strptime(deadline, deadline_format).strftime('%A')
}
)
IFBot.save_json(tugas_schedule_list)
await add_tugas_reminder(matkul_name, tugas_name, desc, deadline)
c = bot.get_channel(CHANNEL_ID)
await c.send("Task {} successfully created.".format(tugas_name))
async def add_tugas_reminder(matkul_name, tugas_name, desc, deadline):
deadline_time = datetime.strptime(deadline, deadline_format)
time_morning = deadline_time.replace(hour=10, minute=00)
scheduler.add_job(send_tugas_embed, 'date', run_date=deadline_time, timezone=tz,
args=[matkul_name, tugas_name, desc, deadline])
async def send_tugas_embed(matkul_name, tugas_name, desc, deadline):
embed=discord.Embed(title="Task Reminder", description="The following task will be due in a few days:".format(tugas_name=tugas_name), color=0x0091ff)
embed.set_author(name="ClassAssistantBot", url="https://github.com/noxfl/ClassAssistantBot/", icon_url="https://avatars.githubusercontent.com/u/64892153?v=4")
embed.add_field(name="{matkul_name}".format(matkul_name=matkul_name), value="{tugas_name}: {desc}\n{deadline} ({day_end})".format(
tugas_name = tugas_name,
desc = desc,
deadline = deadline,
day_end = await get_day(deadline, deadline_format)
), inline=False)
c = bot.get_channel(CHANNEL_ID)
await c.send(embed=embed)
async def get_day(date, deadline_format):
return datetime.strptime(date, deadline_format).strftime('%A')
async def remove_all_scheduler():
for job in scheduler.get_jobs():
print(job)
job.remove()
@bot.command(name='task_reload')
async def reload_scheduler(ctx):
print('\n======== REMOVED ALL SCHEDULER ========')
await remove_all_scheduler()
print('\n======== LOADED MATKUL REMINDER ========')
await populate_matkul_reminder()
print('\n======== LOADED TUGAS REMINDER ========')
await populate_tugas_reminder()
c = bot.get_channel(CHANNEL_ID)
await c.send("Tasks successfully reloaded.")
@bot.command(name='absen')
async def absensi2(ctx, i=0):
count = 0
today_jadwal = {}
jadwal = tugas_schedule_list['jadwal_mobile']
for x in jadwal:
if x['day'] == IFBot.today:
count += 1
today_jadwal[count] = x
await send_absen_embed(today_jadwal, i)
async def send_absen_embed(jadwal, i):
c = bot.get_channel(CHANNEL_ID)
try:
embed=discord.Embed(title="{kode_matkul} {name} | {hour_begin} - {hour_end}.".format(
kode_matkul = jadwal[i]['kode_matkul'],
name = jadwal[i]['name'],
hour_begin = jadwal[i]['hour_begin'],
hour_end = jadwal[i]['hour_end'],
), description="desc", color=0x3dff54)
embed.set_author(name="ClassAssistantBot", url="https://github.com/noxfl", icon_url="https://avatars.githubusercontent.com/u/64892153?v=4")
embed.add_field(name="Be sure to login first before clicking any of the link below.", value="[Login](http://leaps.kalbis.ac.id/login)", inline=False)
embed.add_field(name="Attendance", value="[Tap In/Tap Out]({tap_in_link})".format(tap_in_link = jadwal[i]['tap_in_link']), inline=True)
embed.add_field(name="TLM", value="[Here]({learning_resource})".format(learning_resource = jadwal[i]['learning_resource']), inline=True)
embed.add_field(name="Assignments", value="[Here]({assignment})".format(assignment = jadwal[i]['assignment']), inline=True)
embed.set_footer(text="{lecturer}".format(lecturer=jadwal[i]['lecturer']))
await c.send(embed=embed)
print('Embed sent: {name}'.format(name=jadwal[i]['name']))
except:
matkul_info_message = IFBot.matkul_data_to_message(IFBot.get_matkul_schedule('TODAY'), 'TODAY')
await c.send(matkul_info_message)
@bot.command(name='task', aliases=['tugas', 'deadline'])
async def task_deadline(ctx, arg=None, matkul_name="", tugas_name="", desc="", deadline=""):
c = bot.get_channel(CHANNEL_ID)
if str(arg).upper() == 'NEW':
try:
deadline_dt = datetime.strptime(deadline, deadline_format)
deadline_jkt = deadline_dt.replace(tzinfo=tz)
# deadline = "21 June, 2021 - 10:00"
# deadline_dt = datetime.strptime(deadline, deadline_format)
# deadline_jkt = deadline_dt.replace(tzinfo=tz)
print(deadline_dt)
await c.send(deadline_jkt)
except:
await c.send("```Please enter a valid format\n!task <day|new> <matkul_name> <tugas_name> <desc> <deadline>```")
else:
arg_c1 = str(arg).capitalize()
if arg_c1 in day_list:
arg_c2 = str(day_list.get(arg_c1)).capitalize()
print('arg in list', arg_c2)
else:
arg_c2 = str(arg).capitalize()
print('ARG_C: ', arg_c1)
print('INPUT 2: ', arg)
tugas_info_message = IFBot.tugas_data_to_message(IFBot.get_tugas_schedule(arg_c2), arg_c2)
await c.send(tugas_info_message)
@bot.command(name='class', aliases=['kelas', 'cek', 'jadwal'])
async def day(ctx, arg):
#Checks if user asks for HELP, else run program
if str(arg).upper() == 'HELP':
help_info_message = "```Command usage:\n!class <day> | Shows class schedule according to day requested\n!class list | Shows list of classes in current semester\n!class today | Shows today's class list\n!class help | This menu```"
c = bot.get_channel(CHANNEL_ID)
await c.send(help_info_message)
else:
arg_c1 = str(arg).capitalize()
if arg_c1 in day_list:
arg_c2 = str(day_list.get(arg_c1)).capitalize()
print('arg in list', arg_c2)
else:
arg_c2 = str(arg).capitalize()
print('ARG_C: ', arg_c1)
print('INPUT 2: ', arg)
matkul_info_message = IFBot.matkul_data_to_message(IFBot.get_matkul_schedule(arg_c2), arg_c2)
c = bot.get_channel(CHANNEL_ID)
await c.send(matkul_info_message)
async def send_embed(kode_matkul, name, hour_begin, hour_end, title_desc, desc, link, lecturer, learning_resource, assignment):
try:
embed=discord.Embed(title="{kode_matkul} {name} | {hour_begin} - {hour_end}. {title_desc}.".format(
kode_matkul = kode_matkul,
name = name,
hour_begin = hour_begin,
hour_end = hour_end,
title_desc = title_desc
), description="{desc}".format(desc = desc), color=0x3dff54)
embed.set_author(name="ClassAssistantBot", url="https://github.com/noxfl", icon_url="https://avatars.githubusercontent.com/u/64892153?v=4")
embed.add_field(name="Be sure to login first before clicking any of the link below.", value="[Login](http://leaps.kalbis.ac.id/login)", inline=False)
embed.add_field(name="Attendance", value="[Tap In/Tap Out]({tap_in_link})".format(tap_in_link = link), inline=True)
embed.add_field(name="TLM", value="[Here]({learning_resource})".format(learning_resource = learning_resource), inline=True)
embed.add_field(name="Assignments", value="[Here]({assignment})".format(assignment = assignment), inline=True)
embed.set_footer(text="{lecturer}".format(lecturer=lecturer))
c = bot.get_channel(CHANNEL_ID)
await c.send(embed=embed)
print('Embed sent: {name}'.format(name=name))
except:
print('Embed delivery failed')
def modify_thirty_minutes(hour, mode):
if mode.lower() == 'substract':
time_parsed = datetime.strptime(hour, '%H:%M') - timedelta(hours=0, minutes=30)
# print('TIME SUBSTRACTED: {}'.format(time_parsed))
elif mode.lower() == 'add':
time_parsed = datetime.strptime(hour, '%H:%M') + timedelta(hours=0, minutes=30)
# print('TIME ADDED: {}'.format(time_parsed))
else:
print('{function_name}: Invalid mode value has been entered'.format(function_name = modify_thirty_minutes.__name__))
return hour
time_converted = datetime.strftime(time_parsed, "%H:%M")
return time_converted
async def populate_tugas_reminder():
tugas_schedule_list = IFBot.get_jadwal_json()
tugas_set_count = 0
for tugas in tugas_schedule_list['jadwal_tugas']:
tugas_set_count += 1
print('Adding {name} to schedule..'.format(name=tugas['tugas_name']))
await add_tugas_reminder(tugas['matkul_name'], tugas['tugas_name'], tugas['desc'], tugas['date_end'])
# deadline_time = datetime.strptime(tugas['date_end'], deadline_format)
# time_morning = deadline_time.replace(hour=10, minute=00)
# time_h_min_tiga_morning = deadline_time.replace() #pokoknya mundur 3 hari lah dia
# scheduler.add_job(send_message, 'date', run_date=time_morning, timezone=tz, args=['text'])
print('Tugas set count:', tugas_set_count)
async def show_scheduler():
for job in scheduler.get_jobs():
print(job.name, job.trigger, job.func)
async def populate_matkul_reminder():
matkul_schedule_list = IFBot.get_jadwal_json()
matkul_set_count = 0
tap_in_message = "Tap in here"
tap_out_message = "Tap out here"
for num, matkul in enumerate(matkul_schedule_list['jadwal_mobile']):
matkul_set_count += 1
print('Adding {name} to schedule..'.format(name=matkul['name']))
matkul_start_reminder_message = "{} is about to start soon.\nMake sure to check attendance in by clicking the link above!\nGet ready for today's class, best of luck!".format(matkul['name'])
# matkul_start_reminder_message = "{} is about to start soon.\nMake sure to check attendance in by clicking the link above!\nGet ready for today's class, best of luck!"
matkul_end_reminder_message = "{} has now ended. Don't forget to check your attendance out!".format(matkul['name'])
time_before_class = modify_thirty_minutes(matkul['hour_begin'], 'substract').split(":")
time_after_class = modify_thirty_minutes(matkul['hour_end'], 'add').split(":")
day = matkul['day'][0:3]
scheduler.add_job(send_embed, CronTrigger(day_of_week=day.lower(), hour=time_before_class[0], minute=time_before_class[1], timezone=tz),
args=[matkul['kode_matkul'], matkul['name'], matkul['hour_begin'], matkul['hour_end'],
tap_in_message, matkul_start_reminder_message, matkul['tap_in_link'], matkul['lecturer'], matkul['learning_resource'], matkul['assignment']])
scheduler.add_job(send_embed, CronTrigger(day_of_week=day.lower(), hour=time_after_class[0], minute=time_after_class[1], timezone=tz),
args=[matkul['kode_matkul'], matkul['name'], matkul['hour_begin'], matkul['hour_end'],
tap_out_message, matkul_end_reminder_message, matkul['tap_out_link'], matkul['lecturer'], matkul['learning_resource'], matkul['assignment']])
print('Matkul set count:', matkul_set_count)
async def send_message(message):
try:
c = bot.get_channel(CHANNEL_ID)
await c.send(message)
print('Message sent: {}'.format(message))
except:
print('Message delivery failed')
#testing
async def func():
c = client.get_channel(CHANNEL_ID)
await c.send('from func()')
@bot.command()
async def ping(ctx):
await ctx.channel.send("pong")
@bot.command()
async def here(ctx, given_name):
channel = discord.utils.get(ctx.guild.channels, name=given_name)
channel_id = channel.id
await ctx.channel.send("I will start sending messages here from now on!")
@bot.event
async def on_ready():
print('Successfully logged in as {0.user}'.format(bot))
print("Ready..")
await populate_matkul_reminder()
await populate_tugas_reminder()
# await send_message('Bot has come online')
scheduler.start()
bot.run(DISCORD_TOKEN)
| []
| []
| [
"DISCORD_TOKEN"
]
| [] | ["DISCORD_TOKEN"] | python | 1 | 0 | |
hello_earth/hello_earth/wsgi.py | """
WSGI config for hello_earth project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hello_earth.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
mne/tests/test_event.py | # -*- coding: utf-8 -*-
# Author: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD-3-Clause
import os.path as op
import os
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
from mne import (read_events, write_events, make_fixed_length_events,
find_events, pick_events, find_stim_steps, pick_channels,
read_evokeds, Epochs, create_info, compute_raw_covariance,
Annotations)
from mne.io import read_raw_fif, RawArray
from mne.event import (define_target_events, merge_events, AcqParserFIF,
shift_time_events)
from mne.datasets import testing
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname = op.join(base_dir, 'test-eve.fif')
fname_raw = op.join(base_dir, 'test_raw.fif')
fname_gz = op.join(base_dir, 'test-eve.fif.gz')
fname_1 = op.join(base_dir, 'test-1-eve.fif')
fname_txt = op.join(base_dir, 'test-eve.eve')
fname_txt_1 = op.join(base_dir, 'test-eve-1.eve')
fname_c_annot = op.join(base_dir, 'test_raw-annot.fif')
# for testing Elekta averager
elekta_base_dir = op.join(testing.data_path(download=False), 'misc')
fname_raw_elekta = op.join(elekta_base_dir, 'test_elekta_3ch_raw.fif')
fname_ave_elekta = op.join(elekta_base_dir, 'test_elekta-ave.fif')
# using mne_process_raw --raw test_raw.fif --eventsout test-mpr-eve.eve:
fname_txt_mpr = op.join(base_dir, 'test-mpr-eve.eve')
fname_old_txt = op.join(base_dir, 'test-eve-old-style.eve')
raw_fname = op.join(base_dir, 'test_raw.fif')
def test_fix_stim():
"""Test fixing stim STI016 for Neuromag."""
raw = read_raw_fif(raw_fname, preload=True)
# 32768 (016) + 3 (002+001) bits gets incorrectly coded during acquisition
raw._data[raw.ch_names.index('STI 014'), :3] = [0, -32765, 0]
with pytest.warns(RuntimeWarning, match='STI016'):
events = find_events(raw, 'STI 014')
assert_array_equal(events[0], [raw.first_samp + 1, 0, 32765])
events = find_events(raw, 'STI 014', uint_cast=True)
assert_array_equal(events[0], [raw.first_samp + 1, 0, 32771])
def test_add_events():
"""Test adding events to a Raw file."""
# need preload
raw = read_raw_fif(raw_fname)
events = np.array([[raw.first_samp, 0, 1]])
pytest.raises(RuntimeError, raw.add_events, events, 'STI 014')
raw = read_raw_fif(raw_fname, preload=True)
orig_events = find_events(raw, 'STI 014')
# add some events
events = np.array([raw.first_samp, 0, 1])
pytest.raises(ValueError, raw.add_events, events, 'STI 014') # bad shape
events[0] = raw.first_samp + raw.n_times + 1
events = events[np.newaxis, :]
pytest.raises(ValueError, raw.add_events, events, 'STI 014') # bad time
events[0, 0] = raw.first_samp - 1
pytest.raises(ValueError, raw.add_events, events, 'STI 014') # bad time
events[0, 0] = raw.first_samp + 1 # can't actually be first_samp
pytest.raises(ValueError, raw.add_events, events, 'STI FOO')
raw.add_events(events, 'STI 014')
new_events = find_events(raw, 'STI 014')
assert_array_equal(new_events, np.concatenate((events, orig_events)))
raw.add_events(events, 'STI 014', replace=True)
new_events = find_events(raw, 'STI 014')
assert_array_equal(new_events, events)
def test_merge_events():
"""Test event merging."""
events_orig = [[1, 0, 1], [3, 0, 2], [10, 0, 3], [20, 0, 4]]
events_replacement = \
[[1, 0, 12],
[3, 0, 12],
[10, 0, 34],
[20, 0, 34]]
events_no_replacement = \
[[1, 0, 1],
[1, 0, 12],
[1, 0, 1234],
[3, 0, 2],
[3, 0, 12],
[3, 0, 1234],
[10, 0, 3],
[10, 0, 34],
[10, 0, 1234],
[20, 0, 4],
[20, 0, 34],
[20, 0, 1234]]
for replace_events, events_good in [(True, events_replacement),
(False, events_no_replacement)]:
events = merge_events(events_orig, [1, 2], 12, replace_events)
events = merge_events(events, [3, 4], 34, replace_events)
events = merge_events(events, [1, 2, 3, 4], 1234, replace_events)
assert_array_equal(events, events_good)
def test_io_events(tmpdir):
"""Test IO for events."""
# Test binary fif IO
events = read_events(fname) # Use as the gold standard
fname_temp = tmpdir.join('events-eve.fif')
write_events(fname_temp, events)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# Test binary fif.gz IO
events2 = read_events(fname_gz) # Use as the gold standard
assert_array_almost_equal(events, events2)
fname_temp += '.gz'
write_events(fname_temp, events2)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# Test new format text file IO
fname_temp = str(tmpdir.join('events.eve'))
write_events(fname_temp, events)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
with pytest.warns(RuntimeWarning, match='first row of'):
events2 = read_events(fname_txt_mpr, mask=0, mask_type='not_and')
assert_array_almost_equal(events, events2)
# Test old format text file IO
events2 = read_events(fname_old_txt)
assert_array_almost_equal(events, events2)
write_events(fname_temp, events)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# Test event selection
fname_temp = tmpdir.join('events-eve.fif')
a = read_events(fname_temp, include=1)
b = read_events(fname_temp, include=[1])
c = read_events(fname_temp, exclude=[2, 3, 4, 5, 32])
d = read_events(fname_temp, include=1, exclude=[2, 3])
assert_array_equal(a, b)
assert_array_equal(a, c)
assert_array_equal(a, d)
# test reading file with mask=None
events2 = events.copy()
events2[:, -1] = range(events2.shape[0])
write_events(fname_temp, events2)
events3 = read_events(fname_temp, mask=None)
assert_array_almost_equal(events2, events3)
# Test binary file IO for 1 event
events = read_events(fname_1) # Use as the new gold standard
write_events(fname_temp, events)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# Test text file IO for 1 event
fname_temp = str(tmpdir.join('events.eve'))
write_events(fname_temp, events)
events2 = read_events(fname_temp)
assert_array_almost_equal(events, events2)
# test warnings on bad filenames
fname2 = tmpdir.join('test-bad-name.fif')
with pytest.warns(RuntimeWarning, match='-eve.fif'):
write_events(fname2, events)
with pytest.warns(RuntimeWarning, match='-eve.fif'):
read_events(fname2)
# No event_id
with pytest.raises(RuntimeError, match='No event_id'):
read_events(fname, return_event_id=True)
def test_io_c_annot():
"""Test I/O of MNE-C -annot.fif files."""
raw = read_raw_fif(fname_raw)
sfreq, first_samp = raw.info['sfreq'], raw.first_samp
events = read_events(fname_c_annot)
events_2, event_id = read_events(fname_c_annot, return_event_id=True)
assert_array_equal(events_2, events)
expected = np.arange(2, 5) * sfreq + first_samp
assert_allclose(events[:, 0], expected, atol=3) # clicking accuracy (samp)
expected = {'Two sec': 1001, 'Three and four sec': 1002}
assert event_id == expected
def test_find_events():
"""Test find events in raw file."""
events = read_events(fname)
raw = read_raw_fif(raw_fname, preload=True)
# let's test the defaulting behavior while we're at it
extra_ends = ['', '_1']
orig_envs = [os.getenv('MNE_STIM_CHANNEL%s' % s) for s in extra_ends]
os.environ['MNE_STIM_CHANNEL'] = 'STI 014'
if 'MNE_STIM_CHANNEL_1' in os.environ:
del os.environ['MNE_STIM_CHANNEL_1']
events2 = find_events(raw)
assert_array_almost_equal(events, events2)
# now test with mask
events11 = find_events(raw, mask=3, mask_type='not_and')
with pytest.warns(RuntimeWarning, match='events masked'):
events22 = read_events(fname, mask=3, mask_type='not_and')
assert_array_equal(events11, events22)
# Reset some data for ease of comparison
raw._first_samps[0] = 0
raw.info['sfreq'] = 1000
stim_channel = 'STI 014'
stim_channel_idx = pick_channels(raw.info['ch_names'],
include=[stim_channel])
# test digital masking
raw._data[stim_channel_idx, :5] = np.arange(5)
raw._data[stim_channel_idx, 5:] = 0
# 1 == '0b1', 2 == '0b10', 3 == '0b11', 4 == '0b100'
pytest.raises(TypeError, find_events, raw, mask="0", mask_type='and')
pytest.raises(ValueError, find_events, raw, mask=0, mask_type='blah')
# testing mask_type. default = 'not_and'
assert_array_equal(find_events(raw, shortest_event=1, mask=1,
mask_type='not_and'),
[[2, 0, 2], [4, 2, 4]])
assert_array_equal(find_events(raw, shortest_event=1, mask=2,
mask_type='not_and'),
[[1, 0, 1], [3, 0, 1], [4, 1, 4]])
assert_array_equal(find_events(raw, shortest_event=1, mask=3,
mask_type='not_and'),
[[4, 0, 4]])
assert_array_equal(find_events(raw, shortest_event=1, mask=4,
mask_type='not_and'),
[[1, 0, 1], [2, 1, 2], [3, 2, 3]])
# testing with mask_type = 'and'
assert_array_equal(find_events(raw, shortest_event=1, mask=1,
mask_type='and'),
[[1, 0, 1], [3, 0, 1]])
assert_array_equal(find_events(raw, shortest_event=1, mask=2,
mask_type='and'),
[[2, 0, 2]])
assert_array_equal(find_events(raw, shortest_event=1, mask=3,
mask_type='and'),
[[1, 0, 1], [2, 1, 2], [3, 2, 3]])
assert_array_equal(find_events(raw, shortest_event=1, mask=4,
mask_type='and'),
[[4, 0, 4]])
# test empty events channel
raw._data[stim_channel_idx, :] = 0
assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
raw._data[stim_channel_idx, :4] = 1
assert_array_equal(find_events(raw), np.empty((0, 3), dtype='int32'))
raw._data[stim_channel_idx, -1:] = 9
assert_array_equal(find_events(raw), [[14399, 0, 9]])
# Test that we can handle consecutive events with no gap
raw._data[stim_channel_idx, 10:20] = 5
raw._data[stim_channel_idx, 20:30] = 6
raw._data[stim_channel_idx, 30:32] = 5
raw._data[stim_channel_idx, 40] = 6
assert_array_equal(find_events(raw, consecutive=False),
[[10, 0, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, consecutive=True),
[[10, 0, 5],
[20, 5, 6],
[30, 6, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw),
[[10, 0, 5],
[20, 5, 6],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, output='offset', consecutive=False),
[[31, 0, 5],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, output='offset', consecutive=True),
[[19, 6, 5],
[29, 5, 6],
[31, 0, 5],
[40, 0, 6],
[14399, 0, 9]])
pytest.raises(ValueError, find_events, raw, output='step',
consecutive=True)
assert_array_equal(find_events(raw, output='step', consecutive=True,
shortest_event=1),
[[10, 0, 5],
[20, 5, 6],
[30, 6, 5],
[32, 5, 0],
[40, 0, 6],
[41, 6, 0],
[14399, 0, 9],
[14400, 9, 0]])
assert_array_equal(find_events(raw, output='offset'),
[[19, 6, 5],
[31, 0, 6],
[40, 0, 6],
[14399, 0, 9]])
assert_array_equal(find_events(raw, consecutive=False, min_duration=0.002),
[[10, 0, 5]])
assert_array_equal(find_events(raw, consecutive=True, min_duration=0.002),
[[10, 0, 5],
[20, 5, 6],
[30, 6, 5]])
assert_array_equal(find_events(raw, output='offset', consecutive=False,
min_duration=0.002),
[[31, 0, 5]])
assert_array_equal(find_events(raw, output='offset', consecutive=True,
min_duration=0.002),
[[19, 6, 5],
[29, 5, 6],
[31, 0, 5]])
assert_array_equal(find_events(raw, consecutive=True, min_duration=0.003),
[[10, 0, 5],
[20, 5, 6]])
# test find_stim_steps merge parameter
raw._data[stim_channel_idx, :] = 0
raw._data[stim_channel_idx, 0] = 1
raw._data[stim_channel_idx, 10] = 4
raw._data[stim_channel_idx, 11:20] = 5
assert_array_equal(find_stim_steps(raw, pad_start=0, merge=0,
stim_channel=stim_channel),
[[0, 0, 1],
[1, 1, 0],
[10, 0, 4],
[11, 4, 5],
[20, 5, 0]])
assert_array_equal(find_stim_steps(raw, merge=-1,
stim_channel=stim_channel),
[[1, 1, 0],
[10, 0, 5],
[20, 5, 0]])
assert_array_equal(find_stim_steps(raw, merge=1,
stim_channel=stim_channel),
[[1, 1, 0],
[11, 0, 5],
[20, 5, 0]])
# put back the env vars we trampled on
for s, o in zip(extra_ends, orig_envs):
if o is not None:
os.environ['MNE_STIM_CHANNEL%s' % s] = o
# Test with list of stim channels
raw._data[stim_channel_idx, 1:101] = np.zeros(100)
raw._data[stim_channel_idx, 10:11] = 1
raw._data[stim_channel_idx, 30:31] = 3
stim_channel2 = 'STI 015'
stim_channel2_idx = pick_channels(raw.info['ch_names'],
include=[stim_channel2])
raw._data[stim_channel2_idx, :] = 0
raw._data[stim_channel2_idx, :100] = raw._data[stim_channel_idx, 5:105]
events1 = find_events(raw, stim_channel='STI 014')
events2 = events1.copy()
events2[:, 0] -= 5
events = find_events(raw, stim_channel=['STI 014', stim_channel2])
assert_array_equal(events[::2], events2)
assert_array_equal(events[1::2], events1)
# test initial_event argument
info = create_info(['MYSTI'], 1000, 'stim')
data = np.zeros((1, 1000))
raw = RawArray(data, info)
data[0, :10] = 100
data[0, 30:40] = 200
assert_array_equal(find_events(raw, 'MYSTI'), [[30, 0, 200]])
assert_array_equal(find_events(raw, 'MYSTI', initial_event=True),
[[0, 0, 100], [30, 0, 200]])
# test error message for raw without stim channels
raw = read_raw_fif(raw_fname, preload=True)
raw.pick_types(meg=True, stim=False)
# raw does not have annotations
with pytest.raises(ValueError, match="'stim_channel'"):
find_events(raw)
# if raw has annotations, we show a different error message
raw.set_annotations(Annotations(0, 2, "test"))
with pytest.raises(ValueError, match="mne.events_from_annotations"):
find_events(raw)
def test_pick_events():
"""Test pick events in a events ndarray."""
events = np.array([[1, 0, 1],
[2, 1, 0],
[3, 0, 4],
[4, 4, 2],
[5, 2, 0]])
assert_array_equal(pick_events(events, include=[1, 4], exclude=4),
[[1, 0, 1],
[3, 0, 4]])
assert_array_equal(pick_events(events, exclude=[0, 2]),
[[1, 0, 1],
[3, 0, 4]])
assert_array_equal(pick_events(events, include=[1, 2], step=True),
[[1, 0, 1],
[2, 1, 0],
[4, 4, 2],
[5, 2, 0]])
def test_make_fixed_length_events():
"""Test making events of a fixed length."""
raw = read_raw_fif(raw_fname)
events = make_fixed_length_events(raw, id=1)
assert events.shape[1] == 3
events_zero = make_fixed_length_events(raw, 1, first_samp=False)
assert_equal(events_zero[0, 0], 0)
assert_array_equal(events_zero[:, 0], events[:, 0] - raw.first_samp)
# With limits
tmin, tmax = raw.times[[0, -1]]
duration = tmax - tmin
events = make_fixed_length_events(raw, 1, tmin, tmax, duration)
assert_equal(events.shape[0], 1)
# With bad limits (no resulting events)
pytest.raises(ValueError, make_fixed_length_events, raw, 1,
tmin, tmax - 1e-3, duration)
# not raw, bad id or duration
pytest.raises(TypeError, make_fixed_length_events, raw, 2.3)
pytest.raises(TypeError, make_fixed_length_events, 'not raw', 2)
pytest.raises(TypeError, make_fixed_length_events, raw, 23, tmin, tmax,
'abc')
# Let's try some ugly sample rate/sample count combos
data = np.random.RandomState(0).randn(1, 27768)
# This breaks unless np.round() is used in make_fixed_length_events
info = create_info(1, 155.4499969482422)
raw = RawArray(data, info)
events = make_fixed_length_events(raw, 1, duration=raw.times[-1])
assert events[0, 0] == 0
assert len(events) == 1
# Without use_rounding=True this breaks
raw = RawArray(data[:, :21216], info)
events = make_fixed_length_events(raw, 1, duration=raw.times[-1])
assert events[0, 0] == 0
assert len(events) == 1
# Make sure it gets used properly by compute_raw_covariance
cov = compute_raw_covariance(raw, tstep=None)
expected = np.cov(data[:, :21216])
assert_allclose(cov['data'], expected, atol=1e-12)
# overlaps
events = make_fixed_length_events(raw, 1, duration=1)
assert len(events) == 136
events_ol = make_fixed_length_events(raw, 1, duration=1, overlap=0.5)
assert len(events_ol) == 271
events_ol_2 = make_fixed_length_events(raw, 1, duration=1, overlap=0.9)
assert len(events_ol_2) == 1355
assert_array_equal(events_ol_2[:, 0], np.unique(events_ol_2[:, 0]))
with pytest.raises(ValueError, match='overlap must be'):
make_fixed_length_events(raw, 1, duration=1, overlap=1.1)
def test_define_events():
"""Test defining response events."""
events = read_events(fname)
raw = read_raw_fif(raw_fname)
events_, _ = define_target_events(events, 5, 32, raw.info['sfreq'],
.2, 0.7, 42, 99)
n_target = events[events[:, 2] == 5].shape[0]
n_miss = events_[events_[:, 2] == 99].shape[0]
n_target_ = events_[events_[:, 2] == 42].shape[0]
assert (n_target_ == (n_target - n_miss))
events = np.array([[0, 0, 1],
[375, 0, 2],
[500, 0, 1],
[875, 0, 3],
[1000, 0, 1],
[1375, 0, 3],
[1100, 0, 1],
[1475, 0, 2],
[1500, 0, 1],
[1875, 0, 2]])
true_lag_nofill = [1500., 1500., 1500.]
true_lag_fill = [1500., np.nan, np.nan, 1500., 1500.]
n, lag_nofill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5)
n, lag_fill = define_target_events(events, 1, 2, 250., 1.4, 1.6, 5, 99)
assert_array_equal(true_lag_fill, lag_fill)
assert_array_equal(true_lag_nofill, lag_nofill)
@testing.requires_testing_data
def test_acqparser():
"""Test AcqParserFIF."""
# no acquisition parameters
pytest.raises(ValueError, AcqParserFIF, {'acq_pars': ''})
# invalid acquisition parameters
pytest.raises(ValueError, AcqParserFIF, {'acq_pars': 'baaa'})
pytest.raises(ValueError, AcqParserFIF, {'acq_pars': 'ERFVersion\n1'})
# test oldish file
raw = read_raw_fif(raw_fname, preload=False)
acqp = AcqParserFIF(raw.info)
# test __repr__()
assert (repr(acqp))
# old file should trigger compat mode
assert (acqp.compat)
# count events and categories
assert_equal(len(acqp.categories), 6)
assert_equal(len(acqp._categories), 17)
assert_equal(len(acqp.events), 6)
assert_equal(len(acqp._events), 17)
# get category
assert (acqp['Surprise visual'])
# test TRIUX file
raw = read_raw_fif(fname_raw_elekta, preload=False)
acqp = raw.acqparser
assert (acqp is raw.acqparser) # same one, not regenerated
# test __repr__()
assert (repr(acqp))
# this file should not be in compatibility mode
assert (not acqp.compat)
# nonexistent category
pytest.raises(KeyError, acqp.__getitem__, 'does not exist')
pytest.raises(KeyError, acqp.get_condition, raw, 'foo')
# category not a string
pytest.raises(TypeError, acqp.__getitem__, 0)
# number of events / categories
assert_equal(len(acqp), 7)
assert_equal(len(acqp.categories), 7)
assert_equal(len(acqp._categories), 32)
assert_equal(len(acqp.events), 6)
assert_equal(len(acqp._events), 32)
# get category
assert (acqp['Test event 5'])
@testing.requires_testing_data
def test_acqparser_averaging():
"""Test averaging with AcqParserFIF vs. Elekta software."""
raw = read_raw_fif(fname_raw_elekta, preload=True)
acqp = AcqParserFIF(raw.info)
for cat in acqp.categories:
# XXX datasets match only when baseline is applied to both,
# not sure where relative dc shift comes from
cond = acqp.get_condition(raw, cat)
eps = Epochs(raw, baseline=(-.05, 0), **cond)
ev = eps.average()
ev_ref = read_evokeds(fname_ave_elekta, cat['comment'],
baseline=(-.05, 0), proj=False)
ev_mag = ev.copy()
ev_mag.pick_channels(['MEG0111'])
ev_grad = ev.copy()
ev_grad.pick_channels(['MEG2643', 'MEG1622'])
ev_ref_mag = ev_ref.copy()
ev_ref_mag.pick_channels(['MEG0111'])
ev_ref_grad = ev_ref.copy()
ev_ref_grad.pick_channels(['MEG2643', 'MEG1622'])
assert_allclose(ev_mag.data, ev_ref_mag.data,
rtol=0, atol=1e-15) # tol = 1 fT
# Elekta put these in a different order
assert ev_grad.ch_names[::-1] == ev_ref_grad.ch_names
assert_allclose(ev_grad.data[::-1], ev_ref_grad.data,
rtol=0, atol=1e-13) # tol = 1 fT/cm
def test_shift_time_events():
"""Test events latency shift by a given amount."""
events = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
EXPECTED = [1, 2, 3]
new_events = shift_time_events(events, ids=None, tshift=1, sfreq=1)
assert all(new_events[:, 0] == EXPECTED)
events = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
EXPECTED = [0, 2, 3]
new_events = shift_time_events(events, ids=[1, 2], tshift=1, sfreq=1)
assert all(new_events[:, 0] == EXPECTED)
| []
| []
| [
"MNE_STIM_CHANNEL_1",
"MNE_STIM_CHANNEL",
"MNE_STIM_CHANNEL%s' % "
]
| [] | ["MNE_STIM_CHANNEL_1", "MNE_STIM_CHANNEL", "MNE_STIM_CHANNEL%s' % "] | python | 3 | 0 | |
libnetwork/ipams/remote/remote_test.go | package remote
import (
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/docker/docker/libnetwork/ipamapi"
"github.com/docker/docker/pkg/plugins"
)
func decodeToMap(r *http.Request) (res map[string]interface{}, err error) {
err = json.NewDecoder(r.Body).Decode(&res)
return
}
func handle(t *testing.T, mux *http.ServeMux, method string, h func(map[string]interface{}) interface{}) {
mux.HandleFunc(fmt.Sprintf("/%s.%s", ipamapi.PluginEndpointType, method), func(w http.ResponseWriter, r *http.Request) {
ask, err := decodeToMap(r)
if err != nil && err != io.EOF {
t.Fatal(err)
}
answer := h(ask)
err = json.NewEncoder(w).Encode(&answer)
if err != nil {
t.Fatal(err)
}
})
}
func setupPlugin(t *testing.T, name string, mux *http.ServeMux) func() {
specPath := "/etc/docker/plugins"
if runtime.GOOS == "windows" {
specPath = filepath.Join(os.Getenv("programdata"), "docker", "plugins")
}
if err := os.MkdirAll(specPath, 0755); err != nil {
t.Fatal(err)
}
defer func() {
if t.Failed() {
os.RemoveAll(specPath)
}
}()
server := httptest.NewServer(mux)
if server == nil {
t.Fatal("Failed to start an HTTP Server")
}
if err := os.WriteFile(filepath.Join(specPath, name+".spec"), []byte(server.URL), 0644); err != nil {
t.Fatal(err)
}
mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
fmt.Fprintf(w, `{"Implements": ["%s"]}`, ipamapi.PluginEndpointType)
})
return func() {
if err := os.RemoveAll(specPath); err != nil {
t.Fatal(err)
}
server.Close()
}
}
func TestGetCapabilities(t *testing.T) {
var plugin = "test-ipam-driver-capabilities"
mux := http.NewServeMux()
defer setupPlugin(t, plugin, mux)()
handle(t, mux, "GetCapabilities", func(msg map[string]interface{}) interface{} {
return map[string]interface{}{
"RequiresMACAddress": true,
}
})
p, err := plugins.Get(plugin, ipamapi.PluginEndpointType)
if err != nil {
t.Fatal(err)
}
client, err := getPluginClient(p)
if err != nil {
t.Fatal(err)
}
d := newAllocator(plugin, client)
caps, err := d.(*allocator).getCapabilities()
if err != nil {
t.Fatal(err)
}
if !caps.RequiresMACAddress || caps.RequiresRequestReplay {
t.Fatalf("Unexpected capability: %v", caps)
}
}
func TestGetCapabilitiesFromLegacyDriver(t *testing.T) {
var plugin = "test-ipam-legacy-driver"
mux := http.NewServeMux()
defer setupPlugin(t, plugin, mux)()
p, err := plugins.Get(plugin, ipamapi.PluginEndpointType)
if err != nil {
t.Fatal(err)
}
client, err := getPluginClient(p)
if err != nil {
t.Fatal(err)
}
d := newAllocator(plugin, client)
if _, err := d.(*allocator).getCapabilities(); err == nil {
t.Fatalf("Expected error, but got Success %v", err)
}
}
func TestGetDefaultAddressSpaces(t *testing.T) {
var plugin = "test-ipam-driver-addr-spaces"
mux := http.NewServeMux()
defer setupPlugin(t, plugin, mux)()
handle(t, mux, "GetDefaultAddressSpaces", func(msg map[string]interface{}) interface{} {
return map[string]interface{}{
"LocalDefaultAddressSpace": "white",
"GlobalDefaultAddressSpace": "blue",
}
})
p, err := plugins.Get(plugin, ipamapi.PluginEndpointType)
if err != nil {
t.Fatal(err)
}
client, err := getPluginClient(p)
if err != nil {
t.Fatal(err)
}
d := newAllocator(plugin, client)
l, g, err := d.(*allocator).GetDefaultAddressSpaces()
if err != nil {
t.Fatal(err)
}
if l != "white" || g != "blue" {
t.Fatalf("Unexpected default local and global address spaces: %s, %s", l, g)
}
}
func TestRemoteDriver(t *testing.T) {
var plugin = "test-ipam-driver"
mux := http.NewServeMux()
defer setupPlugin(t, plugin, mux)()
handle(t, mux, "GetDefaultAddressSpaces", func(msg map[string]interface{}) interface{} {
return map[string]interface{}{
"LocalDefaultAddressSpace": "white",
"GlobalDefaultAddressSpace": "blue",
}
})
handle(t, mux, "RequestPool", func(msg map[string]interface{}) interface{} {
as := "white"
if v, ok := msg["AddressSpace"]; ok && v.(string) != "" {
as = v.(string)
}
pl := "172.18.0.0/16"
sp := ""
if v, ok := msg["Pool"]; ok && v.(string) != "" {
pl = v.(string)
}
if v, ok := msg["SubPool"]; ok && v.(string) != "" {
sp = v.(string)
}
pid := fmt.Sprintf("%s/%s", as, pl)
if sp != "" {
pid = fmt.Sprintf("%s/%s", pid, sp)
}
return map[string]interface{}{
"PoolID": pid,
"Pool": pl,
"Data": map[string]string{"DNS": "8.8.8.8"},
}
})
handle(t, mux, "ReleasePool", func(msg map[string]interface{}) interface{} {
if _, ok := msg["PoolID"]; !ok {
t.Fatal("Missing PoolID in Release request")
}
return map[string]interface{}{}
})
handle(t, mux, "RequestAddress", func(msg map[string]interface{}) interface{} {
if _, ok := msg["PoolID"]; !ok {
t.Fatal("Missing PoolID in address request")
}
prefAddr := ""
if v, ok := msg["Address"]; ok {
prefAddr = v.(string)
}
ip := prefAddr
if ip == "" {
ip = "172.20.0.34"
}
ip = fmt.Sprintf("%s/16", ip)
return map[string]interface{}{
"Address": ip,
}
})
handle(t, mux, "ReleaseAddress", func(msg map[string]interface{}) interface{} {
if _, ok := msg["PoolID"]; !ok {
t.Fatal("Missing PoolID in address request")
}
if _, ok := msg["Address"]; !ok {
t.Fatal("Missing Address in release address request")
}
return map[string]interface{}{}
})
p, err := plugins.Get(plugin, ipamapi.PluginEndpointType)
if err != nil {
t.Fatal(err)
}
client, err := getPluginClient(p)
if err != nil {
t.Fatal(err)
}
d := newAllocator(plugin, client)
l, g, err := d.(*allocator).GetDefaultAddressSpaces()
if err != nil {
t.Fatal(err)
}
if l != "white" || g != "blue" {
t.Fatalf("Unexpected default local/global address spaces: %s, %s", l, g)
}
// Request any pool
poolID, pool, _, err := d.RequestPool("white", "", "", nil, false)
if err != nil {
t.Fatal(err)
}
if poolID != "white/172.18.0.0/16" {
t.Fatalf("Unexpected pool id: %s", poolID)
}
if pool == nil || pool.String() != "172.18.0.0/16" {
t.Fatalf("Unexpected pool: %s", pool)
}
// Request specific pool
poolID2, pool2, ops, err := d.RequestPool("white", "172.20.0.0/16", "", nil, false)
if err != nil {
t.Fatal(err)
}
if poolID2 != "white/172.20.0.0/16" {
t.Fatalf("Unexpected pool id: %s", poolID2)
}
if pool2 == nil || pool2.String() != "172.20.0.0/16" {
t.Fatalf("Unexpected pool: %s", pool2)
}
if dns, ok := ops["DNS"]; !ok || dns != "8.8.8.8" {
t.Fatal("Missing options")
}
// Request specific pool and subpool
poolID3, pool3, _, err := d.RequestPool("white", "172.20.0.0/16", "172.20.3.0/24" /*nil*/, map[string]string{"culo": "yes"}, false)
if err != nil {
t.Fatal(err)
}
if poolID3 != "white/172.20.0.0/16/172.20.3.0/24" {
t.Fatalf("Unexpected pool id: %s", poolID3)
}
if pool3 == nil || pool3.String() != "172.20.0.0/16" {
t.Fatalf("Unexpected pool: %s", pool3)
}
// Request any address
addr, _, err := d.RequestAddress(poolID2, nil, nil)
if err != nil {
t.Fatal(err)
}
if addr == nil || addr.String() != "172.20.0.34/16" {
t.Fatalf("Unexpected address: %s", addr)
}
// Request specific address
addr2, _, err := d.RequestAddress(poolID2, net.ParseIP("172.20.1.45"), nil)
if err != nil {
t.Fatal(err)
}
if addr2 == nil || addr2.String() != "172.20.1.45/16" {
t.Fatalf("Unexpected address: %s", addr2)
}
// Release address
err = d.ReleaseAddress(poolID, net.ParseIP("172.18.1.45"))
if err != nil {
t.Fatal(err)
}
}
| [
"\"programdata\""
]
| []
| [
"programdata"
]
| [] | ["programdata"] | go | 1 | 0 | |
cmd/server/main.go | package main
import (
"context"
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/mysql"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/tomwright/daselplayground/internal"
"github.com/tomwright/daselplayground/internal/storage"
"github.com/tomwright/lifetime"
"log"
"os"
"strings"
)
func main() {
var snippetStore storage.SnippetStore
if os.Getenv("MYSQL_HOST") == "" {
snippetStore = storage.NewInMemorySnippetStore()
} else {
db, err := mysqlConnect()
if err != nil {
log.Printf("could not connect to mysql: %s", err)
os.Exit(1)
}
if err := migrateUp(db); err != nil {
log.Printf("could not migrate up: %s", err)
os.Exit(1)
}
snippetStore = storage.NewMySQLSnippetStore(db)
}
executor := internal.NewExecutor(snippetStore)
for _, build := range strings.Split(os.Getenv("DASEL_BUILDS"), ",") {
split := strings.Split(build, ":")
executor.RegisterVersion(&internal.VersionOpts{
Version: split[0],
Path: split[1],
})
}
httpService := internal.NewHTTPService(os.Getenv("HTTP_LISTEN_ADDRESS"), executor, snippetStore)
lt := lifetime.New(context.Background()).Init()
// Start the http service.
lt.Start(httpService)
// Wait for all routines to stop running.
lt.Wait()
}
func mysqlConnect() (*sql.DB, error) {
dsn := fmt.Sprintf(
"%s:%s@tcp(%s:%s)/%s?parseTime=true&multiStatements=true",
os.Getenv("MYSQL_USERNAME"),
os.Getenv("MYSQL_PASSWORD"),
os.Getenv("MYSQL_HOST"),
os.Getenv("MYSQL_PORT"),
os.Getenv("MYSQL_DATABASE"),
)
db, err := sql.Open("mysql", dsn)
if err != nil {
return nil, fmt.Errorf("mysql open failed: %w", err)
}
if err := db.Ping(); err != nil {
return nil, fmt.Errorf("mysql ping failed: %w", err)
}
return db, nil
}
func migrateUp(db *sql.DB) error {
migrationsPath := os.Getenv("MIGRATIONS_PATH")
if migrationsPath == "" {
migrationsPath = "migrations"
}
driver, err := mysql.WithInstance(db, &mysql.Config{})
if err != nil {
return fmt.Errorf("could not get driver instance: %w", err)
}
m, err := migrate.NewWithDatabaseInstance("file://"+migrationsPath, "mysql", driver)
if err != nil {
return fmt.Errorf("could not get migrate instance: %w", err)
}
if err := m.Up(); err != nil {
if err != migrate.ErrNoChange {
return fmt.Errorf("migrate up failed: %w", err)
}
}
return nil
}
| [
"\"MYSQL_HOST\"",
"\"DASEL_BUILDS\"",
"\"HTTP_LISTEN_ADDRESS\"",
"\"MYSQL_USERNAME\"",
"\"MYSQL_PASSWORD\"",
"\"MYSQL_HOST\"",
"\"MYSQL_PORT\"",
"\"MYSQL_DATABASE\"",
"\"MIGRATIONS_PATH\""
]
| []
| [
"DASEL_BUILDS",
"MYSQL_PASSWORD",
"MIGRATIONS_PATH",
"MYSQL_PORT",
"MYSQL_USERNAME",
"HTTP_LISTEN_ADDRESS",
"MYSQL_DATABASE",
"MYSQL_HOST"
]
| [] | ["DASEL_BUILDS", "MYSQL_PASSWORD", "MIGRATIONS_PATH", "MYSQL_PORT", "MYSQL_USERNAME", "HTTP_LISTEN_ADDRESS", "MYSQL_DATABASE", "MYSQL_HOST"] | go | 8 | 0 | |
integration-cli/request/request.go | package request
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path/filepath"
"strings"
"time"
dclient "github.com/docker/docker/client"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/testutil"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
)
// Method creates a modifier that sets the specified string as the request method
func Method(method string) func(*http.Request) error {
return func(req *http.Request) error {
req.Method = method
return nil
}
}
// RawString sets the specified string as body for the request
func RawString(content string) func(*http.Request) error {
return RawContent(ioutil.NopCloser(strings.NewReader(content)))
}
// RawContent sets the specified reader as body for the request
func RawContent(reader io.ReadCloser) func(*http.Request) error {
return func(req *http.Request) error {
req.Body = reader
return nil
}
}
// ContentType sets the specified Content-Type request header
func ContentType(contentType string) func(*http.Request) error {
return func(req *http.Request) error {
req.Header.Set("Content-Type", contentType)
return nil
}
}
// JSON sets the Content-Type request header to json
func JSON(req *http.Request) error {
return ContentType("application/json")(req)
}
// JSONBody creates a modifier that encodes the specified data to a JSON string and set it as request body. It also sets
// the Content-Type header of the request.
func JSONBody(data interface{}) func(*http.Request) error {
return func(req *http.Request) error {
jsonData := bytes.NewBuffer(nil)
if err := json.NewEncoder(jsonData).Encode(data); err != nil {
return err
}
req.Body = ioutil.NopCloser(jsonData)
req.Header.Set("Content-Type", "application/json")
return nil
}
}
// Post creates and execute a POST request on the specified host and endpoint, with the specified request modifiers
func Post(endpoint string, modifiers ...func(*http.Request) error) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, append(modifiers, Method(http.MethodPost))...)
}
// Delete creates and execute a DELETE request on the specified host and endpoint, with the specified request modifiers
func Delete(endpoint string, modifiers ...func(*http.Request) error) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, append(modifiers, Method(http.MethodDelete))...)
}
// Get creates and execute a GET request on the specified host and endpoint, with the specified request modifiers
func Get(endpoint string, modifiers ...func(*http.Request) error) (*http.Response, io.ReadCloser, error) {
return Do(endpoint, modifiers...)
}
// Do creates and execute a request on the specified endpoint, with the specified request modifiers
func Do(endpoint string, modifiers ...func(*http.Request) error) (*http.Response, io.ReadCloser, error) {
return DoOnHost(DaemonHost(), endpoint, modifiers...)
}
// DoOnHost creates and execute a request on the specified host and endpoint, with the specified request modifiers
func DoOnHost(host, endpoint string, modifiers ...func(*http.Request) error) (*http.Response, io.ReadCloser, error) {
req, err := New(host, endpoint, modifiers...)
if err != nil {
return nil, nil, err
}
client, err := NewHTTPClient(host)
if err != nil {
return nil, nil, err
}
resp, err := client.Do(req)
var body io.ReadCloser
if resp != nil {
body = ioutils.NewReadCloserWrapper(resp.Body, func() error {
defer resp.Body.Close()
return nil
})
}
return resp, body, err
}
// New creates a new http Request to the specified host and endpoint, with the specified request modifiers
func New(host, endpoint string, modifiers ...func(*http.Request) error) (*http.Request, error) {
_, addr, _, err := dclient.ParseHost(host)
if err != nil {
return nil, err
}
if err != nil {
return nil, errors.Wrapf(err, "could not parse url %q", host)
}
req, err := http.NewRequest("GET", endpoint, nil)
if err != nil {
return nil, fmt.Errorf("could not create new request: %v", err)
}
req.URL.Scheme = "http"
req.URL.Host = addr
for _, config := range modifiers {
if err := config(req); err != nil {
return nil, err
}
}
return req, nil
}
// NewHTTPClient creates an http client for the specific host
func NewHTTPClient(host string) (*http.Client, error) {
// FIXME(vdemeester) 10*time.Second timeout of SockRequest… ?
proto, addr, _, err := dclient.ParseHost(host)
if err != nil {
return nil, err
}
transport := new(http.Transport)
if proto == "tcp" && os.Getenv("DOCKER_TLS_VERIFY") != "" {
// Setup the socket TLS configuration.
tlsConfig, err := getTLSConfig()
if err != nil {
return nil, err
}
transport = &http.Transport{TLSClientConfig: tlsConfig}
}
transport.DisableKeepAlives = true
err = sockets.ConfigureTransport(transport, proto, addr)
return &http.Client{
Transport: transport,
}, err
}
// NewClient returns a new Docker API client
func NewClient() (dclient.APIClient, error) {
host := DaemonHost()
httpClient, err := NewHTTPClient(host)
if err != nil {
return nil, err
}
return dclient.NewClient(host, "", httpClient, nil)
}
// FIXME(vdemeester) httputil.ClientConn is deprecated, use http.Client instead (closer to actual client)
// Deprecated: Use New instead of NewRequestClient
// Deprecated: use request.Do (or Get, Delete, Post) instead
func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string, modifiers ...func(*http.Request)) (*http.Request, *httputil.ClientConn, error) {
c, err := SockConn(time.Duration(10*time.Second), daemon)
if err != nil {
return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err)
}
client := httputil.NewClientConn(c, nil)
req, err := http.NewRequest(method, endpoint, data)
if err != nil {
client.Close()
return nil, nil, fmt.Errorf("could not create new request: %v", err)
}
for _, opt := range modifiers {
opt(req)
}
if ct != "" {
req.Header.Set("Content-Type", ct)
}
return req, client, nil
}
// SockRequest create a request against the specified host (with method, endpoint and other request modifier) and
// returns the status code, and the content as an byte slice
// Deprecated: use request.Do instead
func SockRequest(method, endpoint string, data interface{}, daemon string, modifiers ...func(*http.Request)) (int, []byte, error) {
jsonData := bytes.NewBuffer(nil)
if err := json.NewEncoder(jsonData).Encode(data); err != nil {
return -1, nil, err
}
res, body, err := SockRequestRaw(method, endpoint, jsonData, "application/json", daemon, modifiers...)
if err != nil {
return -1, nil, err
}
b, err := testutil.ReadBody(body)
return res.StatusCode, b, err
}
// SockRequestRaw create a request against the specified host (with method, endpoint and other request modifier) and
// returns the http response, the output as a io.ReadCloser
// Deprecated: use request.Do (or Get, Delete, Post) instead
func SockRequestRaw(method, endpoint string, data io.Reader, ct, daemon string, modifiers ...func(*http.Request)) (*http.Response, io.ReadCloser, error) {
req, client, err := newRequestClient(method, endpoint, data, ct, daemon, modifiers...)
if err != nil {
return nil, nil, err
}
resp, err := client.Do(req)
if err != nil {
client.Close()
return resp, nil, err
}
body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
defer resp.Body.Close()
return client.Close()
})
return resp, body, err
}
// SockRequestHijack creates a connection to specified host (with method, contenttype, …) and returns a hijacked connection
// and the output as a `bufio.Reader`
func SockRequestHijack(method, endpoint string, data io.Reader, ct string, daemon string, modifiers ...func(*http.Request)) (net.Conn, *bufio.Reader, error) {
req, client, err := newRequestClient(method, endpoint, data, ct, daemon, modifiers...)
if err != nil {
return nil, nil, err
}
client.Do(req)
conn, br := client.Hijack()
return conn, br, nil
}
// SockConn opens a connection on the specified socket
func SockConn(timeout time.Duration, daemon string) (net.Conn, error) {
daemonURL, err := url.Parse(daemon)
if err != nil {
return nil, errors.Wrapf(err, "could not parse url %q", daemon)
}
var c net.Conn
switch daemonURL.Scheme {
case "npipe":
return npipeDial(daemonURL.Path, timeout)
case "unix":
return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout)
case "tcp":
if os.Getenv("DOCKER_TLS_VERIFY") != "" {
// Setup the socket TLS configuration.
tlsConfig, err := getTLSConfig()
if err != nil {
return nil, err
}
dialer := &net.Dialer{Timeout: timeout}
return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig)
}
return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout)
default:
return c, errors.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon)
}
}
func getTLSConfig() (*tls.Config, error) {
dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
if dockerCertPath == "" {
return nil, errors.New("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable")
}
option := &tlsconfig.Options{
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
}
tlsConfig, err := tlsconfig.Client(*option)
if err != nil {
return nil, err
}
return tlsConfig, nil
}
// DaemonHost return the daemon host string for this test execution
func DaemonHost() string {
daemonURLStr := "unix://" + opts.DefaultUnixSocket
if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" {
daemonURLStr = daemonHostVar
}
return daemonURLStr
}
| [
"\"DOCKER_TLS_VERIFY\"",
"\"DOCKER_TLS_VERIFY\"",
"\"DOCKER_CERT_PATH\"",
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST",
"DOCKER_CERT_PATH",
"DOCKER_TLS_VERIFY"
]
| [] | ["DOCKER_HOST", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY"] | go | 3 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zuessite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go | // +build windows,!dockerless
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"os"
"runtime"
"github.com/blang/semver"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerfilters "github.com/docker/docker/api/types/filters"
"k8s.io/klog/v2"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
)
func DefaultMemorySwap() int64 {
return 0
}
func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) {
if seccompProfile != "" {
klog.Warningf("seccomp annotations are not supported on windows")
}
return nil, nil
}
// applyExperimentalCreateConfig applys experimental configures from sandbox annotations.
func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) {
if kubeletapis.ShouldIsolatedByHyperV(annotations) {
createConfig.HostConfig.Isolation = kubeletapis.HypervIsolationValue
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode == "" {
createConfig.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
}
}
}
func (ds *dockerService) updateCreateConfig(
createConfig *dockertypes.ContainerCreateConfig,
config *runtimeapi.ContainerConfig,
sandboxConfig *runtimeapi.PodSandboxConfig,
podSandboxID string, securityOptSep rune, apiVersion *semver.Version) error {
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode != "" {
createConfig.HostConfig.NetworkMode = dockercontainer.NetworkMode(networkMode)
} else if !kubeletapis.ShouldIsolatedByHyperV(sandboxConfig.Annotations) {
// Todo: Refactor this call in future for calling methods directly in security_context.go
modifyHostOptionsForContainer(nil, podSandboxID, createConfig.HostConfig)
}
// Apply Windows-specific options if applicable.
if wc := config.GetWindows(); wc != nil {
rOpts := wc.GetResources()
if rOpts != nil {
// Precedence and units for these are described at length in kuberuntime_container_windows.go - generateWindowsContainerConfig()
createConfig.HostConfig.Resources = dockercontainer.Resources{
Memory: rOpts.MemoryLimitInBytes,
CPUShares: rOpts.CpuShares,
CPUCount: rOpts.CpuCount,
NanoCPUs: rOpts.CpuMaximum * int64(runtime.NumCPU()) * (1e9 / 10000),
}
}
// Apply security context.
applyWindowsContainerSecurityContext(wc.GetSecurityContext(), createConfig.Config, createConfig.HostConfig)
}
applyExperimentalCreateConfig(createConfig, sandboxConfig.Annotations)
return nil
}
// applyWindowsContainerSecurityContext updates docker container options according to security context.
func applyWindowsContainerSecurityContext(wsc *runtimeapi.WindowsContainerSecurityContext, config *dockercontainer.Config, hc *dockercontainer.HostConfig) {
if wsc == nil {
return
}
if wsc.GetRunAsUsername() != "" {
config.User = wsc.GetRunAsUsername()
}
}
func (ds *dockerService) determinePodIPBySandboxID(sandboxID string) []string {
opts := dockertypes.ContainerListOptions{
All: true,
Filters: dockerfilters.NewArgs(),
}
f := newDockerFilter(&opts.Filters)
f.AddLabel(containerTypeLabelKey, containerTypeLabelContainer)
f.AddLabel(sandboxIDLabelKey, sandboxID)
containers, err := ds.client.ListContainers(opts)
if err != nil {
return nil
}
for _, c := range containers {
r, err := ds.client.InspectContainer(c.ID)
if err != nil {
continue
}
// Versions and feature support
// ============================
// Windows version == Windows Server, Version 1709, Supports both sandbox and non-sandbox case
// Windows version == Windows Server 2016 Support only non-sandbox case
// Windows version < Windows Server 2016 is Not Supported
// Sandbox support in Windows mandates CNI Plugin.
// Presence of CONTAINER_NETWORK flag is considered as non-Sandbox cases here
// Todo: Add a kernel version check for more validation
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode == "" {
// On Windows, every container that is created in a Sandbox, needs to invoke CNI plugin again for adding the Network,
// with the shared container name as NetNS info,
// This is passed down to the platform to replicate some necessary information to the new container
//
// This place is chosen as a hack for now, since ds.getIP would end up calling CNI's addToNetwork
// That is why addToNetwork is required to be idempotent
// Instead of relying on this call, an explicit call to addToNetwork should be
// done immediately after ContainerCreation, in case of Windows only. TBD Issue # to handle this
if r.HostConfig.Isolation == kubeletapis.HypervIsolationValue {
// Hyper-V only supports one container per Pod yet and the container will have a different
// IP address from sandbox. Return the first non-sandbox container IP as POD IP.
// TODO(feiskyer): remove this workaround after Hyper-V supports multiple containers per Pod.
if containerIPs := ds.getIPs(c.ID, r); len(containerIPs) != 0 {
return containerIPs
}
} else {
// Do not return any IP, so that we would continue and get the IP of the Sandbox.
// Windows 1709 and 1803 doesn't have the Namespace support, so getIP() is called
// to replicate the DNS registry key to the Workload container (IP/Gateway/MAC is
// set separately than DNS).
// TODO(feiskyer): remove this workaround after Namespace is supported in Windows RS5.
ds.getIPs(sandboxID, r)
}
} else {
// ds.getIP will call the CNI plugin to fetch the IP
if containerIPs := ds.getIPs(c.ID, r); len(containerIPs) != 0 {
return containerIPs
}
}
}
return nil
}
func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) {
// Currently in windows there is no identifier exposed for network namespace
// Like docker, the referenced container id is used to figure out the network namespace id internally by the platform
// so returning the docker networkMode (which holds container:<ref containerid> for network namespace here
return string(c.HostConfig.NetworkMode), nil
}
| [
"\"CONTAINER_NETWORK\"",
"\"CONTAINER_NETWORK\"",
"\"CONTAINER_NETWORK\""
]
| []
| [
"CONTAINER_NETWORK"
]
| [] | ["CONTAINER_NETWORK"] | go | 1 | 0 | |
test/test_extra.py | # -*- coding: utf-8 -*-
#
# Extra tests which are not part of the SHT or DASH test suites,
# nor the discrete issues tests or the cmdline_test file.
# The need for these tests are discovered by doing coverage checks and these
# are added as required.
import os
import re
from rdflib import Graph
from pyshacl import validate
from pyshacl.errors import ReportableRuntimeError
ontology_file_text = """
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix exOnt: <http://example.com/exOnt#> .
<http://example.com/exOnt> a owl:Ontology ;
rdfs:label "An example extra-ontology file."@en .
exOnt:Animal a rdfs:Class ;
rdfs:comment "The parent class for Humans and Pets"@en ;
rdfs:subClassOf owl:Thing .
exOnt:Human a rdfs:Class ;
rdfs:comment "A Human being"@en ;
rdfs:subClassOf exOnt:Animal .
exOnt:Pet a rdfs:Class ;
rdfs:comment "An animal owned by a human"@en ;
rdfs:subClassOf exOnt:Animal .
exOnt:hasPet a rdf:Property ;
rdfs:domain exOnt:Human ;
rdfs:range exOnt:Pet .
exOnt:nlegs a rdf:Property ;
rdfs:domain exOnt:Animal ;
rdfs:range xsd:integer .
exOnt:Teacher a rdfs:Class ;
rdfs:comment "A Human who is a teacher."@en ;
rdfs:subClassOf exOnt:Human .
exOnt:PreschoolTeacher a rdfs:Class ;
rdfs:comment "A Teacher who teaches preschool."@en ;
rdfs:subClassOf exOnt:Teacher .
exOnt:Lizard a rdfs:Class ;
rdfs:subClassOf exOnt:Pet .
exOnt:Goanna a rdfs:Class ;
rdfs:subClassOf exOnt:Lizard .
"""
shacl_file_text = """
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix exShape: <http://example.com/exShape#> .
@prefix exOnt: <http://example.com/exOnt#> .
<http://example.com/exShape> a owl:Ontology ;
rdfs:label "Example Shapes File"@en .
exShape:HumanShape a sh:NodeShape ;
sh:property [
sh:class exOnt:Pet ;
sh:path exOnt:hasPet ;
] ;
sh:property [
sh:datatype xsd:integer ;
sh:path exOnt:nLegs ;
sh:maxInclusive 2 ;
sh:minInclusive 2 ;
] ;
sh:targetClass exOnt:Human .
exShape:AnimalShape a sh:NodeShape ;
sh:property [
sh:datatype xsd:integer ;
sh:path exOnt:nLegs ;
sh:maxInclusive 4 ;
sh:minInclusive 1 ;
] ;
sh:targetClass exOnt:Animal .
"""
data_file_text = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix exOnt: <http://example.com/exOnt#> .
@prefix ex: <http://example.com/ex#> .
ex:Human1 rdf:type exOnt:PreschoolTeacher ;
rdf:label "Amy" ;
exOnt:nLegs "2"^^xsd:integer ;
exOnt:hasPet ex:Pet1 .
ex:Pet1 rdf:type exOnt:Goanna ;
rdf:label "Sebastian" ;
exOnt:nLegs "4"^^xsd:integer .
"""
data_file_text_bad = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix exOnt: <http://example.com/exOnt#> .
@prefix ex: <http://example.com/ex#> .
ex:Human1 rdf:type exOnt:PreschoolTeacher ;
rdf:label "Amy" ;
exOnt:nLegs "2"^^xsd:integer ;
exOnt:hasPet "Sebastian"^^xsd:string .
ex:Pet1 rdf:type exOnt:Goanna ;
rdf:label "Sebastian" ;
exOnt:nLegs "four"^^xsd:string .
"""
def test_validate_with_ontology():
g = Graph().parse(data=data_file_text, format='turtle')
e = Graph().parse(data=ontology_file_text, format='turtle')
g_len = len(g)
res = validate(g, shacl_graph=shacl_file_text,
shacl_graph_format='turtle',
ont_graph=e, inference='both', debug=True)
conforms, graph, string = res
g_len2 = len(g)
assert conforms
assert g_len2 == g_len
def test_validate_with_ontology_inplace():
g = Graph().parse(data=data_file_text, format='turtle')
e = Graph().parse(data=ontology_file_text, format='turtle')
g_len = len(g)
res = validate(g, shacl_graph=shacl_file_text,
shacl_graph_format='turtle',
ont_graph=e, inference='both', debug=True, inplace=True)
conforms, graph, string = res
g_len2 = len(g)
assert conforms
assert g_len2 != g_len
def test_validate_with_ontology_fail1():
res = validate(data_file_text_bad, shacl_graph=shacl_file_text,
data_graph_format='turtle', shacl_graph_format='turtle',
ont_graph=ontology_file_text, ont_graph_format="turtle",
inference='both', debug=True)
conforms, graph, string = res
assert not conforms
def test_validate_with_ontology_fail2():
res = validate(data_file_text_bad, shacl_graph=shacl_file_text,
data_graph_format='turtle', shacl_graph_format='turtle',
ont_graph=ontology_file_text, ont_graph_format="turtle",
inference=None, debug=True)
conforms, graph, string = res
assert conforms
def test_metashacl_pass():
res = validate(data_file_text, shacl_graph=shacl_file_text,
meta_shacl=True, data_graph_format='turtle',
shacl_graph_format='turtle', ont_graph=ontology_file_text,
ont_graph_format="turtle", inference='both', debug=True)
conforms, graph, string = res
assert conforms
def test_metashacl_fail():
bad_shacl_text = """
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ex: <http://example.com/ex#> .
ex:HumanShape a sh:NodeShape ;
sh:property [
sh:class ex:Pet ;
sh:path "2"^^xsd:integer ;
] ;
sh:property [
sh:datatype xsd:integer ;
sh:path ex:nLegs ;
sh:maxInclusive 2 ;
sh:minInclusive 2 ;
] ;
sh:targetClass ex:Human .
ex:AnimalShape a sh:NodeShape ;
sh:property [
sh:datatype xsd:integer ;
sh:path ex:nLegs ;
sh:maxInclusive 4 ;
sh:minInclusive 1 ;
] ;
sh:targetClass ex:Animal .
"""
did_error = False
try:
res = validate(data_file_text, shacl_graph=bad_shacl_text,
meta_shacl=True, data_graph_format='turtle',
shacl_graph_format='turtle', ont_graph=ontology_file_text,
ont_graph_format="turtle", inference='both', debug=True)
conforms, graph, string = res
assert not conforms
except ReportableRuntimeError as r:
assert "Shacl Shapes Shacl file" in r.message
did_error = True
assert did_error
data_file_text_bn = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix exOnt: <http://example.com/exOnt#> .
@prefix ex: <http://example.com/ex#> .
ex:Student1 exOnt:hasTeacher [
rdf:type exOnt:PreschoolTeacher ;
rdf:label "Amy" ;
exOnt:nLegs "2"^^xsd:integer ;
exOnt:hasPet ex:Pet1 ]
.
ex:Pet1 rdf:type exOnt:Goanna ;
rdf:label "Sebastian" ;
exOnt:nLegs "4"^^xsd:integer .
"""
data_file_text_bad_bn = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix exOnt: <http://example.com/exOnt#> .
@prefix ex: <http://example.com/ex#> .
ex:Student1 exOnt:hasTeacher [
rdf:type exOnt:PreschoolTeacher ;
rdf:label "Amy" ;
exOnt:nLegs "2"^^xsd:integer ;
exOnt:hasPet "Sebastian"^^xsd:string ]
.
ex:Pet1 rdf:type exOnt:Goanna ;
rdf:label "Sebastian" ;
exOnt:nLegs "four"^^xsd:string .
"""
def test_blank_node_string_generation():
res = validate(data_file_text_bad_bn, shacl_graph=shacl_file_text,
data_graph_format='turtle', shacl_graph_format='turtle',
ont_graph=ontology_file_text, ont_graph_format="turtle",
inference='rdfs', debug=True)
conforms, graph, string = res
assert not conforms
rx = r"^\s*Focus Node\:\s+\[.+rdf:type\s+.+exOnt\:PreschoolTeacher.*\]$"
matches = re.search(rx, string, flags=re.MULTILINE)
assert matches
def test_serialize_report_graph():
res = validate(data_file_text, shacl_graph=shacl_file_text,
data_graph_format='turtle', serialize_report_graph=True,
shacl_graph_format='turtle', ont_graph=ontology_file_text,
ont_graph_format="turtle", inference='both', debug=True)
conforms, graph, string = res
assert isinstance(graph, (str, bytes))
shacl_file_property_shapes_text = """\
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix exShape: <http://example.com/exShape#> .
@prefix exOnt: <http://example.com/exOnt#> .
<http://example.com/exShape> a owl:Ontology ;
rdfs:label "Example Shapes File"@en .
exShape:HumanHasPetShape a sh:PropertyShape ;
sh:class exOnt:Pet ;
sh:path exOnt:hasPet ;
sh:targetClass exOnt:Human .
exShape:HumanHasLegsShape a sh:PropertyShape ;
sh:datatype xsd:integer ;
sh:path exOnt:nLegs ;
sh:maxInclusive 2 ;
sh:minInclusive 2 ;
sh:targetClass exOnt:Human .
exShape:PetHasLegsShape a sh:PropertyShape ;
sh:datatype xsd:integer ;
sh:path exOnt:nLegs ;
sh:maxInclusive 4 ;
sh:minInclusive 1 ;
sh:targetClass exOnt:Animal .
"""
def test_property_shape_focus():
res = validate(data_file_text, shacl_graph=shacl_file_property_shapes_text,
data_graph_format='turtle', shacl_graph_format='turtle',
ont_graph=ontology_file_text, ont_graph_format="turtle",
inference='rdfs', debug=True)
conforms, graph, string = res
assert conforms
def test_property_shape_focus_fail1():
res = validate(data_file_text_bad, shacl_graph=shacl_file_property_shapes_text,
data_graph_format='turtle', shacl_graph_format='turtle',
ont_graph=ontology_file_text, ont_graph_format="turtle",
inference='rdfs', debug=True)
conforms, graph, string = res
assert not conforms
web_d1_ttl = """\
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix exOnt: <http://example.com/exOnt#> .
@prefix ex: <http://example.com/ex#> .
ex:Human1 rdf:type exOnt:Human ;
rdf:label "Amy" ;
exOnt:nLegs "2"^^xsd:integer ;
exOnt:hasPet ex:Pet1 .
ex:Pet1 rdf:type exOnt:Lizard ;
rdf:label "Sebastian" ;
exOnt:nLegs "4"^^xsd:integer .
"""
web_d2_ttl = """\
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix exOnt: <http://example.com/exOnt#> .
@prefix ex: <http://example.com/ex#> .
ex:Human1 rdf:type exOnt:Human ;
rdf:label "Amy" ;
exOnt:nLegs "2"^^xsd:integer ;
exOnt:hasPet "Sebastian"^^xsd:string .
ex:Pet1 rdf:type exOnt:Lizard ;
rdf:label "Sebastian" ;
exOnt:nLegs "g"^^xsd:string .
"""
def test_web_retrieve():
DEB_BUILD_ARCH = os.environ.get('DEB_BUILD_ARCH', None)
DEB_HOST_ARCH = os.environ.get('DEB_HOST_ARCH', None)
if DEB_BUILD_ARCH is not None or DEB_HOST_ARCH is not None:
print("Cannot run web requests in debhelper tests.")
assert True
return True
shacl_file = "https://raw.githubusercontent.com/RDFLib/pySHACL/master/test/resources/cmdline_tests/s1.ttl"
ont_file = "https://raw.githubusercontent.com/RDFLib/pySHACL/master/test/resources/cmdline_tests/o1.ttl"
res = validate(web_d1_ttl, shacl_graph=shacl_file, data_graph_format='turtle',
shacl_graph_format='turtle', ont_graph=ont_file,
ont_graph_format="turtle", inference='both', debug=True)
conforms, graph, string = res
assert conforms
def test_web_retrieve_fail():
DEB_BUILD_ARCH = os.environ.get('DEB_BUILD_ARCH', None)
DEB_HOST_ARCH = os.environ.get('DEB_HOST_ARCH', None)
if DEB_BUILD_ARCH is not None or DEB_HOST_ARCH is not None:
print("Cannot run web requests in debhelper tests.")
assert True
return True
shacl_file = "https://raw.githubusercontent.com/RDFLib/pySHACL/master/test/resources/cmdline_tests/s1.ttl"
ont_file = "https://raw.githubusercontent.com/RDFLib/pySHACL/master/test/resources/cmdline_tests/o1.ttl"
res = validate(web_d2_ttl, shacl_graph=shacl_file, data_graph_format='turtle',
shacl_graph_format='turtle', ont_graph=ont_file,
ont_graph_format="turtle", inference='both', debug=True)
conforms, graph, string = res
assert not conforms
my_partial_shapes_text = """
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix ex: <http://example.com/ex1#> .
<http://example.com/ex1> a owl:Ontology ;
owl:imports <https://raw.githubusercontent.com/RDFLib/pySHACL/master/test/resources/cmdline_tests/s1.ttl> .
"""
my_partial_ont_text = """
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix ex: <http://example.com/ex2#> .
<http://example.com/ex2> a owl:Ontology ;
owl:imports <https://raw.githubusercontent.com/RDFLib/pySHACL/master/test/resources/cmdline_tests/o1.ttl> .
"""
def test_owl_imports():
DEB_BUILD_ARCH = os.environ.get('DEB_BUILD_ARCH', None)
DEB_HOST_ARCH = os.environ.get('DEB_HOST_ARCH', None)
if DEB_BUILD_ARCH is not None or DEB_HOST_ARCH is not None:
print("Cannot run owl:imports in debhelper tests.")
assert True
return True
res = validate(web_d1_ttl, shacl_graph=my_partial_shapes_text, data_graph_format='turtle',
shacl_graph_format='turtle', ont_graph=my_partial_ont_text,
ont_graph_format="turtle", inference='both', debug=True, do_owl_imports=True)
conforms, graph, string = res
print(string)
assert conforms
def test_owl_imports_fail():
DEB_BUILD_ARCH = os.environ.get('DEB_BUILD_ARCH', None)
DEB_HOST_ARCH = os.environ.get('DEB_HOST_ARCH', None)
if DEB_BUILD_ARCH is not None or DEB_HOST_ARCH is not None:
print("Cannot run owl:imports in debhelper tests.")
assert True
return True
res = validate(web_d2_ttl, shacl_graph=my_partial_shapes_text, data_graph_format='turtle',
shacl_graph_format='turtle', ont_graph=my_partial_ont_text,
ont_graph_format="turtle", inference='both', debug=True, do_owl_imports=True)
conforms, graph, string = res
print(string)
assert not conforms
def test_sparql_message_subst():
df = '''@prefix ex: <http://datashapes.org/sh/tests/#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
ex:ValidResource1
rdf:type rdfs:Resource ;
.
ex:InvalidResource1
rdf:type rdfs:Resource ;
rdfs:label "Invalid resource 1" ;
.
ex:InvalidResource2
rdf:type rdfs:Resource ;
rdfs:label "Invalid label 1" ;
rdfs:label "Invalid label 2" ;
.
ex:TestShape
rdf:type sh:NodeShape ;
rdfs:label "Test shape" ;
sh:sparql ex:TestShape-sparql ;
sh:targetNode ex:InvalidResource1 ;
sh:targetNode ex:InvalidResource2 ;
sh:targetNode ex:ValidResource1 ;
.
ex:TestShape-sparql
sh:message "{$this} cannot have a {$path} of {$value}" ;
sh:prefixes <http://datashapes.org/sh/tests/sparql/node/sparql-001.test> ;
sh:select """
SELECT $this ?path ?value
WHERE {
$this ?path ?value .
FILTER (?path = <http://www.w3.org/2000/01/rdf-schema#label>) .
}""" ;
.'''
res = validate(df, data_graph_format='turtle', inference=None, debug=True,)
conforms, graph, s = res
assert "#InvalidResource1 cannot have a http://www.w3.org/2000/01/rdf-schema#label of Invalid resource 1" in s
assert "#InvalidResource2 cannot have a http://www.w3.org/2000/01/rdf-schema#label of Invalid label 1" in s
assert "#InvalidResource2 cannot have a http://www.w3.org/2000/01/rdf-schema#label of Invalid label 2" in s
assert not conforms
if __name__ == "__main__":
test_validate_with_ontology()
test_validate_with_ontology_fail1()
test_validate_with_ontology_fail2()
test_metashacl_pass()
test_metashacl_fail()
test_blank_node_string_generation()
test_property_shape_focus()
test_property_shape_focus_fail1()
test_web_retrieve()
test_serialize_report_graph()
test_owl_imports()
test_owl_imports_fail()
test_sparql_message_subst()
| []
| []
| [
"DEB_HOST_ARCH",
"DEB_BUILD_ARCH"
]
| [] | ["DEB_HOST_ARCH", "DEB_BUILD_ARCH"] | python | 2 | 0 | |
test/net/sectorsoftware/ygo/deck/DeckSetTest.java | package net.sectorsoftware.ygo.deck;
import static org.junit.Assert.assertEquals;
import java.io.Closeable;
import java.io.IOException;
import java.lang.RuntimeException;
import java.util.Map;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import net.sectorsoftware.ygo.data.DataTypes;
import net.sectorsoftware.ygo.data.DataTypes.DeckType;
import net.sectorsoftware.ygo.data.DataTypes.StaticCardData;
import net.sectorsoftware.ygo.deck.DataTypes.DeckError;
public class DeckSetTest
{
static {
System.loadLibrary("ygodeck-jni");
}
static class DeckSet_Fixture implements Closeable
{
public DeckSet_Fixture()
{
DB.setPath(System.getenv("CARD_DB_PATH"));
User user = new User("DeckSetUser", true);
Format format = new Format(DataTypes.Format.ADVANCED, "April 2004");
deckSet = new DeckSet("DeckSetTest", user, format, true);
user.delete();
format.delete();
}
@Override
public void close() throws IOException
{
deckSet.remove();
deckSet.delete();
}
}
private DeckSet_Fixture fixture;
private static DeckSet deckSet;
@Before
public void setup()
{
fixture = new DeckSet_Fixture();
}
@After
public void tearDown() throws IOException
{
fixture.close();
fixture = null;
}
@Test
public void AddCard()
{
DeckError mainErr = deckSet.addCard(DeckType.MAIN,
"Blue-Eyes White Dragon");
assertEquals(mainErr, DeckError.OK);
DeckError sideErr = deckSet.addCard(DeckType.SIDE, "Mirror Force");
assertEquals(sideErr, DeckError.OK);
DeckError extraErr = deckSet.addCard(DeckType.EXTRA, "Stardust Dragon");
assertEquals(extraErr, DeckError.OK);
Map<DeckType, List<StaticCardData>> cards = deckSet.cards();
List<StaticCardData> main = cards.get(DeckType.MAIN);
List<StaticCardData> side = cards.get(DeckType.SIDE);
List<StaticCardData> extra = cards.get(DeckType.EXTRA);
assertEquals(main.size(), 1);
assertEquals(main.get(0).name, "Blue-Eyes White Dragon");
assertEquals(side.size(), 1);
assertEquals(side.get(0).name, "Mirror Force");
assertEquals(extra.size(), 1);
assertEquals(extra.get(0).name, "Stardust Dragon");
}
@Test
public void DeleteCard()
{
deckSet.addCard(DeckType.MAIN, "Evilswarm Castor");
deckSet.addCard(DeckType.SIDE, "Pot of Duality");
deckSet.addCard(DeckType.EXTRA, "Abyss Dweller");
Map<DeckType, List<StaticCardData>> cards = deckSet.cards();
List<StaticCardData> main = cards.get(DeckType.MAIN);
List<StaticCardData> side = cards.get(DeckType.SIDE);
List<StaticCardData> extra = cards.get(DeckType.EXTRA);
assertEquals(main.size(), 1);
assertEquals(side.size(), 1);
assertEquals(extra.size(), 1);
deckSet.deleteCard(DeckType.MAIN, "Evilswarm Castor");
deckSet.deleteCard(DeckType.SIDE, "Pot of Duality");
deckSet.deleteCard(DeckType.EXTRA, "Abyss Dweller");
cards = deckSet.cards();
main = cards.get(DeckType.MAIN);
side = cards.get(DeckType.SIDE);
extra = cards.get(DeckType.EXTRA);
assertEquals(main.size(), 0);
assertEquals(side.size(), 0);
assertEquals(extra.size(), 0);
}
@Test
public void DeleteNotPresentCard()
{
deckSet.addCard(DeckType.MAIN, "Evilswarm Castor");
deckSet.addCard(DeckType.SIDE, "Pot of Duality");
deckSet.addCard(DeckType.EXTRA, "Abyss Dweller");
Map<DeckType, List<StaticCardData>> cards = deckSet.cards();
List<StaticCardData> main = cards.get(DeckType.MAIN);
List<StaticCardData> side = cards.get(DeckType.SIDE);
List<StaticCardData> extra = cards.get(DeckType.EXTRA);
assertEquals(main.size(), 1);
assertEquals(side.size(), 1);
assertEquals(extra.size(), 1);
deckSet.deleteCard(DeckType.MAIN, "Blue-Eyes White Dragon");
deckSet.deleteCard(DeckType.SIDE, "Pot of Greed");
deckSet.deleteCard(DeckType.EXTRA, "Gagaga Cowboy");
cards = deckSet.cards();
main = cards.get(DeckType.MAIN);
side = cards.get(DeckType.SIDE);
extra = cards.get(DeckType.EXTRA);
assertEquals(main.size(), 1);
assertEquals(side.size(), 1);
assertEquals(extra.size(), 1);
}
@Test
public void Validate()
{
deckSet.addCard(DeckType.MAIN, "Evilswarm Castor");
deckSet.addCard(DeckType.MAIN, "Evilswarm Castor");
deckSet.addCard(DeckType.MAIN, "Evilswarm Castor");
deckSet.addCard(DeckType.MAIN, "Evilswarm Heliotrope");
deckSet.addCard(DeckType.MAIN, "Evilswarm Heliotrope");
deckSet.addCard(DeckType.MAIN, "Evilswarm Heliotrope");
deckSet.addCard(DeckType.MAIN, "Evilswarm Mandragora");
deckSet.addCard(DeckType.MAIN, "Evilswarm Mandragora");
deckSet.addCard(DeckType.MAIN, "Evilswarm Mandragora");
deckSet.addCard(DeckType.MAIN, "Gravekeeper's Commandant");
deckSet.addCard(DeckType.MAIN, "Gravekeeper's Commandant");
deckSet.addCard(DeckType.MAIN, "Gravekeeper's Descendant");
deckSet.addCard(DeckType.MAIN, "Gravekeeper's Spy");
deckSet.addCard(DeckType.MAIN, "Gravekeeper's Spy");
deckSet.addCard(DeckType.MAIN, "Gravekeeper's Spy");
deckSet.addCard(DeckType.MAIN, "Rescue Rabbit");
deckSet.addCard(DeckType.MAIN, "Thunder King Rai-Oh");
deckSet.addCard(DeckType.MAIN, "Allure of Darkness");
assertEquals(deckSet.validate(), false);
deckSet.addCard(DeckType.MAIN, "Dark Hole");
deckSet.addCard(DeckType.MAIN, "Infestation Pandemic");
deckSet.addCard(DeckType.MAIN, "Infestation Pandemic");
deckSet.addCard(DeckType.MAIN, "Necrovalley");
deckSet.addCard(DeckType.MAIN, "Necrovalley");
deckSet.addCard(DeckType.MAIN, "Necrovalley");
deckSet.addCard(DeckType.MAIN, "Pot of Duality");
deckSet.addCard(DeckType.MAIN, "Pot of Duality");
deckSet.addCard(DeckType.MAIN, "Reinforcement of the Army");
deckSet.addCard(DeckType.MAIN, "Reinforcement of the Army");
deckSet.addCard(DeckType.MAIN, "Bottomless Trap Hole");
deckSet.addCard(DeckType.MAIN, "Compulsory Evacuation Device");
deckSet.addCard(DeckType.MAIN, "Dimensional Prison");
deckSet.addCard(DeckType.MAIN, "Dimensional Prison");
deckSet.addCard(DeckType.MAIN, "Dimensional Prison");
deckSet.addCard(DeckType.MAIN, "Fiendish Chain");
deckSet.addCard(DeckType.MAIN, "Fiendish Chain");
deckSet.addCard(DeckType.MAIN, "Infestation Infection");
deckSet.addCard(DeckType.MAIN, "Solemn Warning");
deckSet.addCard(DeckType.MAIN, "Torrential Tribute");
deckSet.addCard(DeckType.MAIN, "Wiretap");
deckSet.addCard(DeckType.MAIN, "Wiretap");
assertEquals(deckSet.validate(), true);
deckSet.addCard(DeckType.EXTRA, "Abyss Dweller");
deckSet.addCard(DeckType.EXTRA, "Cairngorgon, Antiluminescent Knight");
deckSet.addCard(DeckType.EXTRA, "Evilswarm Bahamut");
deckSet.addCard(DeckType.EXTRA, "Evilswarm Exciton Knight");
deckSet.addCard(DeckType.EXTRA, "Evilswarm Ophion");
deckSet.addCard(DeckType.EXTRA, "Evilswarm Ophion");
deckSet.addCard(DeckType.EXTRA, "Evilswarm Ouroboros");
deckSet.addCard(DeckType.EXTRA, "Evilswarm Thanatos");
deckSet.addCard(DeckType.EXTRA, "Gagaga Cowboy");
deckSet.addCard(DeckType.EXTRA, "Maestroke the Symphony Djinn");
deckSet.addCard(DeckType.EXTRA, "Number 101: Silent Honor ARK");
deckSet.addCard(DeckType.EXTRA, "Number 101: Silent Honor ARK");
deckSet.addCard(DeckType.EXTRA, "Number 103: Ragnazero");
deckSet.addCard(DeckType.EXTRA, "Number 66: Master Key Beetle");
deckSet.addCard(DeckType.EXTRA, "Number 82: Heartlandraco");
assertEquals(deckSet.validate(), true);
deckSet.addCard(DeckType.SIDE, "Trap Hole");
deckSet.addCard(DeckType.SIDE, "White Hole");
deckSet.addCard(DeckType.SIDE, "Debunk");
deckSet.addCard(DeckType.SIDE, "Debunk");
deckSet.addCard(DeckType.SIDE, "Mirror Force");
deckSet.addCard(DeckType.SIDE, "Mirror Force");
deckSet.addCard(DeckType.SIDE, "Mirror Force");
deckSet.addCard(DeckType.SIDE, "Evilswarm Mandragora");
deckSet.addCard(DeckType.SIDE, "Mind Control");
deckSet.addCard(DeckType.SIDE, "Soul Release");
deckSet.addCard(DeckType.SIDE, "Spiritualism");
deckSet.addCard(DeckType.SIDE, "Spiritualism");
deckSet.addCard(DeckType.SIDE, "Vanity's Emptiness");
deckSet.addCard(DeckType.SIDE, "Vanity's Emptiness");
deckSet.addCard(DeckType.SIDE, "Vanity's Emptiness");
assertEquals(deckSet.validate(), true);
deckSet.addCard(DeckType.MAIN, "Archfiend Heiress");
deckSet.addCard(DeckType.MAIN, "Armageddon Knight");
deckSet.addCard(DeckType.MAIN, "Dark Grepher");
deckSet.addCard(DeckType.MAIN, "Dark Grepher");
deckSet.addCard(DeckType.MAIN, "Infernity Archfiend");
deckSet.addCard(DeckType.MAIN, "Infernity Archfiend");
deckSet.addCard(DeckType.MAIN, "Infernity Archfiend");
deckSet.addCard(DeckType.MAIN, "Infernity Necromancer");
deckSet.addCard(DeckType.MAIN, "Infernity Necromancer");
deckSet.addCard(DeckType.MAIN, "Stygian Street Patrol");
deckSet.addCard(DeckType.MAIN, "Stygian Street Patrol");
deckSet.addCard(DeckType.MAIN, "Stygian Street Patrol");
deckSet.addCard(DeckType.MAIN, "Summoner Monk");
deckSet.addCard(DeckType.MAIN, "Summoner Monk");
deckSet.addCard(DeckType.MAIN, "Infernity Barrier");
deckSet.addCard(DeckType.MAIN, "Infernity Break");
deckSet.addCard(DeckType.MAIN, "Infernity Break");
deckSet.addCard(DeckType.MAIN, "Infernity Break");
deckSet.addCard(DeckType.MAIN, "Trap Stun");
deckSet.addCard(DeckType.MAIN, "Trap Stun");
assertEquals(deckSet.validate(), true);
}
}
| [
"\"CARD_DB_PATH\""
]
| []
| [
"CARD_DB_PATH"
]
| [] | ["CARD_DB_PATH"] | java | 1 | 0 | |
registry/registry_test.go | package registry_test
import (
"os"
"sync"
"time"
"github.com/moleculer-go/cupaloy/v2"
bus "github.com/moleculer-go/goemitter"
"github.com/moleculer-go/moleculer"
"github.com/moleculer-go/moleculer/broker"
"github.com/moleculer-go/moleculer/transit/memory"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
)
var logLevel = "fatal"
var snap = cupaloy.New(cupaloy.FailOnUpdate(os.Getenv("UPDATE_SNAPSHOTS") == "true"))
func createPrinterBroker(mem *memory.SharedMemory) broker.ServiceBroker {
broker := broker.New(&moleculer.Config{
DiscoverNodeID: func() string { return "node_printerBroker" },
LogLevel: logLevel,
TransporterFactory: func() interface{} {
transport := memory.Create(log.WithField("transport", "memory"), mem)
return &transport
},
})
broker.Publish(moleculer.ServiceSchema{
Name: "printer",
Actions: []moleculer.Action{
{
Name: "print",
Handler: func(context moleculer.Context, params moleculer.Payload) interface{} {
context.Logger().Info("print action invoked. params: ", params)
return params.Value()
},
},
},
Events: []moleculer.Event{
{
Name: "printed",
Handler: func(context moleculer.Context, params moleculer.Payload) {
context.Logger().Info("printer.printed --> ", params.Value())
},
},
},
})
return (*broker)
}
func createScannerBroker(mem *memory.SharedMemory) broker.ServiceBroker {
broker := broker.New(&moleculer.Config{
DiscoverNodeID: func() string { return "node_scannerBroker" },
LogLevel: logLevel,
TransporterFactory: func() interface{} {
transport := memory.Create(log.WithField("transport", "memory"), mem)
return &transport
},
})
broker.Publish(moleculer.ServiceSchema{
Name: "scanner",
Actions: []moleculer.Action{
{
Name: "scan",
Handler: func(context moleculer.Context, params moleculer.Payload) interface{} {
context.Logger().Info("scan action invoked!")
return params.Value()
},
},
},
Events: []moleculer.Event{
{
Name: "scanned",
Handler: func(context moleculer.Context, params moleculer.Payload) {
context.Logger().Info("scanner.scanned --> ", params.Value())
},
},
},
})
return (*broker)
}
func createCpuBroker(mem *memory.SharedMemory) broker.ServiceBroker {
broker := broker.New(&moleculer.Config{
DiscoverNodeID: func() string { return "node_cpuBroker" },
LogLevel: logLevel,
TransporterFactory: func() interface{} {
transport := memory.Create(log.WithField("transport", "memory"), mem)
return &transport
},
})
broker.Publish(moleculer.ServiceSchema{
Name: "cpu",
Actions: []moleculer.Action{
{
Name: "compute",
Handler: func(context moleculer.Context, params moleculer.Payload) interface{} {
context.Logger().Debug("compute action invoked!")
scanResult := <-context.Call("scanner.scan", params)
context.Logger().Debug("scanResult: ", scanResult)
printResult := <-context.Call("printer.print", scanResult)
return printResult
},
},
},
})
broker.Publish(moleculer.ServiceSchema{
Name: "printer",
Actions: []moleculer.Action{
{
Name: "print",
Handler: func(context moleculer.Context, params moleculer.Payload) interface{} {
return params.Value()
},
},
},
Events: []moleculer.Event{
{
Name: "printed",
Handler: func(context moleculer.Context, params moleculer.Payload) {
context.Logger().Info("printer.printed --> ", params.Value())
},
},
},
})
return (*broker)
}
func hasNode(list []moleculer.Payload, nodeID string) bool {
for _, p := range list {
if p.Get("nodeID").String() == nodeID {
return true
}
}
return false
}
var _ = Describe("Registry", func() {
Describe("Auto discovery", func() {
It("3 brokers should auto discovery and perform local and remote Calls", func(done Done) {
mem := &memory.SharedMemory{}
printerBroker := createPrinterBroker(mem)
var serviceAdded, serviceRemoved []moleculer.Payload
events := bus.Construct()
addedMutex := &sync.Mutex{}
printerBroker.Publish(moleculer.ServiceSchema{
Name: "internal-consumer",
Events: []moleculer.Event{
moleculer.Event{
Name: "$registry.service.added",
Handler: func(ctx moleculer.Context, params moleculer.Payload) {
addedMutex.Lock()
defer addedMutex.Unlock()
serviceAdded = append(serviceAdded, params)
go events.EmitSync("$registry.service.added", serviceAdded)
},
},
moleculer.Event{
Name: "$registry.service.removed",
Handler: func(ctx moleculer.Context, params moleculer.Payload) {
serviceRemoved = append(serviceRemoved, params)
go events.EmitSync("$registry.service.removed", serviceRemoved)
},
},
},
})
onEvent := func(event string, callback func(list []moleculer.Payload, cancel func())) {
events.On(event, func(v ...interface{}) {
list := v[0].([]moleculer.Payload)
callback(list, func() {
events = bus.Construct()
})
})
}
Expect(printerBroker.LocalNode().GetID()).Should(Equal("node_printerBroker"))
scannerBroker := createScannerBroker(mem)
Expect(scannerBroker.LocalNode().GetID()).Should(Equal("node_scannerBroker"))
cpuBroker := createCpuBroker(mem)
Expect(cpuBroker.LocalNode().GetID()).Should(Equal("node_cpuBroker"))
printerBroker.Start()
printText := "TEXT TO PRINT"
printResult := <-printerBroker.Call("printer.print", printText)
Expect(printResult.Error()).Should(BeNil())
Expect(printResult.Value()).Should(Equal(printText))
scanText := "TEXT TO SCAN"
scanResult := <-printerBroker.Call("scanner.scan", printText)
Expect(scanResult.IsError()).Should(BeTrue())
scannerBroker.Start()
step := make(chan bool)
onEvent("$registry.service.added", func(list []moleculer.Payload, cancel func()) {
if hasNode(serviceAdded, "node_scannerBroker") {
cancel()
step <- true
}
})
<-step
scanResult = <-scannerBroker.Call("scanner.scan", scanText)
Expect(scanResult.IsError()).ShouldNot(Equal(true))
Expect(scanResult.Value()).Should(Equal(scanText))
scanResult = <-printerBroker.Call("scanner.scan", scanText)
Expect(scanResult.IsError()).ShouldNot(Equal(true))
Expect(scanResult.Value()).Should(Equal(scanText))
cpuBroker.Start()
serviceAdded = []moleculer.Payload{}
step = make(chan bool)
onEvent("$registry.service.added", func(list []moleculer.Payload, cancel func()) {
if hasNode(serviceAdded, "node_cpuBroker") {
cancel()
step <- true
}
})
<-step
cpuBroker.WaitForActions("scanner.scan", "printer.print")
time.Sleep(time.Millisecond)
contentToCompute := "Some long long text ..."
computeResult := <-printerBroker.Call("cpu.compute", contentToCompute)
Expect(computeResult.Error()).Should(Succeed())
Expect(computeResult.Value()).Should(Equal(contentToCompute))
//stopping broker B
scannerBroker.Stop()
step = make(chan bool)
onEvent("$registry.service.removed", func(list []moleculer.Payload, cancel func()) {
if hasNode(serviceRemoved, "node_scannerBroker") {
cancel()
step <- true
}
})
<-step
Expect(func() {
<-scannerBroker.Call("scanner.scan", scanText)
}).Should(Panic()) //broker B is stopped ... so it should panic
close(done)
}, 3)
})
Describe("Namespace", func() {
It("Services across namespaces cannos see each other", func(done Done) {
mem := &memory.SharedMemory{}
devBroker := broker.New(&moleculer.Config{
DiscoverNodeID: func() string { return "node1_devBroker" },
LogLevel: logLevel,
Namespace: "dev",
TransporterFactory: func() interface{} {
transport := memory.Create(log.WithField("transport", "memory"), mem)
return &transport
},
})
stageBroker := broker.New(&moleculer.Config{
DiscoverNodeID: func() string { return "node1_stageBroker" },
LogLevel: logLevel,
Namespace: "stage",
TransporterFactory: func() interface{} {
transport := memory.Create(log.WithField("transport", "memory"), mem)
return &transport
},
})
stage2Broker := broker.New(&moleculer.Config{
DiscoverNodeID: func() string { return "node1_stage2Broker" },
LogLevel: logLevel,
Namespace: "stage",
TransporterFactory: func() interface{} {
transport := memory.Create(log.WithField("transport", "memory"), mem)
return &transport
},
})
//alarm service - prints the alarm and return the namespace :)
alarmService := func(namemspace string) moleculer.ServiceSchema {
return moleculer.ServiceSchema{
Name: "alarm",
Actions: []moleculer.Action{
{
Name: "bell",
Handler: func(context moleculer.Context, params moleculer.Payload) interface{} {
context.Logger().Info("alarm.bell ringing !!! namemspace: ", namemspace)
return namemspace
},
},
},
}
}
//available in the dev namespace only
devOnlyService := moleculer.ServiceSchema{
Name: "devOnly",
Actions: []moleculer.Action{
{
Name: "code",
Handler: func(context moleculer.Context, params moleculer.Payload) interface{} {
return "🧠"
},
},
},
}
devBroker.Publish(alarmService("dev"))
devBroker.Publish(devOnlyService)
devBroker.Start()
stageBroker.Start()
stage2Broker.Publish(moleculer.ServiceSchema{
Name: "stage2",
Actions: []moleculer.Action{
{
Name: "where",
Handler: func(context moleculer.Context, params moleculer.Payload) interface{} {
return "🌏"
},
},
},
})
stage2Broker.Start()
devAlarm := <-devBroker.Call("alarm.bell", nil)
Expect(devAlarm.IsError()).Should(BeFalse())
Expect(devAlarm.String()).Should(Equal("dev"))
code := <-devBroker.Call("devOnly.code", nil)
Expect(code.IsError()).Should(BeFalse())
Expect(code.String()).Should(Equal("🧠"))
time.Sleep(time.Millisecond)
//alarm.bell should not be accessible to the stage broker
stageAlarm := <-stageBroker.Call("alarm.bell", nil)
Expect(stageAlarm.IsError()).Should(BeTrue())
Expect(stageAlarm.Error().Error()).Should(Equal("Registry - endpoint not found for actionName: alarm.bell namespace: stage"))
stageBroker.Publish(alarmService("stage"))
stageAlarm = <-stageBroker.Call("alarm.bell", nil)
Expect(stageAlarm.IsError()).Should(BeFalse())
Expect(stageAlarm.String()).Should(Equal("stage"))
code = <-stageBroker.Call("good.code", nil)
Expect(code.IsError()).Should(BeTrue())
Expect(code.Error().Error()).Should(Equal("Registry - endpoint not found for actionName: good.code namespace: stage"))
//make sure 2 brokers on the same namespace can talk to each other
msg := <-stageBroker.Call("stage2.where", nil)
Expect(msg.IsError()).Should(BeFalse())
Expect(msg.String()).Should(Equal("🌏"))
devBroker.Stop()
stageBroker.Stop()
stage2Broker.Stop()
close(done)
}, 2)
})
})
| [
"\"UPDATE_SNAPSHOTS\""
]
| []
| [
"UPDATE_SNAPSHOTS"
]
| [] | ["UPDATE_SNAPSHOTS"] | go | 1 | 0 | |
doc/conf.py | # -*- coding: utf-8 -*-
# pylint: disable=C0103,W0622
'''
Sphinx documentation for Salt
'''
import functools
import sys
import os
import types
from sphinx.directives import TocTree
# pylint: disable=R0903
class Mock(object):
'''
Mock out specified imports
This allows autodoc to do its thing without having oodles of req'd
installed libs. This doesn't work with ``import *`` imports.
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
'''
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
ret = Mock()
# If mocked function is used as a decorator, expose decorated function.
# if args and callable(args[-1]):
# functools.update_wrapper(ret, args[0])
return ret
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return Mock()
# pylint: enable=R0903
MOCK_MODULES = [
# salt core
'Crypto',
'Crypto.Cipher',
'Crypto.Hash',
'Crypto.PublicKey',
'Crypto.Random',
'M2Crypto',
'msgpack',
'yaml',
'yaml.constructor',
'yaml.nodes',
'yaml.scanner',
'zmq',
# third-party libs for cloud modules
'libcloud',
'libcloud.compute',
'libcloud.compute.base',
'libcloud.compute.deployment',
'libcloud.compute.providers',
'libcloud.compute.types',
'libcloud.loadbalancer',
'libcloud.loadbalancer.types',
'libcloud.loadbalancer.providers',
'libcloud.common',
'libcloud.common.google',
# third-party libs for netapi modules
'cherrypy',
'cherrypy.lib',
'cherrypy.process',
'cherrypy.wsgiserver',
'cherrypy.wsgiserver.ssl_builtin',
'tornado',
'tornado.concurrent',
'tornado.gen',
'tornado.httpserver',
'tornado.ioloop',
'tornado.web',
'tornado.websocket',
'ws4py',
'ws4py.server',
'ws4py.server.cherrypyserver',
'ws4py.websocket',
# modules, renderers, states, returners, et al
'django',
'libvirt',
'mako',
'mako.template',
'MySQLdb',
'MySQLdb.cursors',
'psutil',
'pycassa',
'pymongo',
'rabbitmq_server',
'redis',
'requests',
'rpm',
'rpmUtils',
'rpmUtils.arch',
'yum',
'OpenSSL',
'zfs'
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -- Add paths to PYTHONPATH ---------------------------------------------------
try:
docs_basepath = os.path.abspath(os.path.dirname(__file__))
except NameError:
# sphinx-intl and six execute some code which will raise this NameError
# assume we're in the doc/ directory
docs_basepath = os.path.abspath(os.path.dirname('.'))
addtl_paths = (
os.pardir, # salt itself (for autodoc)
'_ext', # custom Sphinx extensions
)
for path in addtl_paths:
sys.path.insert(0, os.path.abspath(os.path.join(docs_basepath, path)))
# We're now able to import salt
import salt.version
formulas_dir = os.path.join(os.pardir, docs_basepath, 'formulas')
# ----- Intersphinx Settings ------------------------------------------------>
intersphinx_mapping = {
'python2': ('http://docs.python.org/2', None),
'python3': ('http://docs.python.org/3', None)
}
# <---- Intersphinx Settings -------------------------------------------------
# -- General Configuration -----------------------------------------------------
project = 'Salt'
copyright = '2014 SaltStack, Inc.'
version = salt.version.__version__
#release = '.'.join(map(str, salt.version.__version_info__))
release = '2014.1.10'
spelling_lang = 'en_US'
language = 'en'
locale_dirs = [
'_locale',
]
master_doc = 'contents'
templates_path = ['_templates']
exclude_patterns = ['_build', '_incl/*', 'ref/cli/_includes/*.rst']
extensions = [
'saltdomain', # Must come early
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'httpdomain',
'youtube',
'saltautodoc', # Must be AFTER autodoc
'shorturls',
]
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions += ['sphinxcontrib.spelling']
modindex_common_prefix = ['salt.']
autosummary_generate = True
# Define a substitution for linking to the latest release tarball
rst_prolog = """\
.. |saltrepo| replace:: https://github.com/saltstack/salt
.. _`salt-users`: https://groups.google.com/forum/#!forum/salt-users
.. _`salt-announce`: https://groups.google.com/forum/#!forum/salt-announce
.. _`salt-packagers`: https://groups.google.com/forum/#!forum/salt-packagers
"""
# A shortcut for linking to tickets on the GitHub issue tracker
extlinks = {
'blob': ('https://github.com/saltstack/salt/blob/%s/%%s' % 'develop', None),
'download': ('https://cloud.github.com/downloads/saltstack/salt/%s', None),
'issue': ('https://github.com/saltstack/salt/issues/%s', 'issue '),
'formula': ('https://github.com/saltstack-formulas/%s', ''),
}
# ----- Localization -------------------------------------------------------->
locale_dirs = ['locale/']
gettext_compact = False
# <---- Localization ---------------------------------------------------------
### HTML options
html_theme = 'saltstack'
html_theme_path = ['_themes']
html_title = None
html_short_title = 'Salt'
html_static_path = ['_static']
html_logo = None # specfied in the theme layout.html
html_favicon = 'favicon.ico'
html_use_smartypants = False
# Set a var if we're building docs for the live site or not
on_saltstack = 'SALT_ON_SALTSTACK' in os.environ
# Use Google customized search or use Sphinx built-in JavaScript search
if on_saltstack:
html_search_template = 'googlesearch.html'
else:
html_search_template = 'searchbox.html'
html_additional_pages = {
'404': '404.html',
}
html_default_sidebars = [
html_search_template,
'version.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
]
html_sidebars = {
'ref/**/all/salt.*': [
html_search_template,
'version.html',
'modules-sidebar.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'saltstack.html',
],
'ref/formula/all/*': [
],
}
html_context = {
'on_saltstack': on_saltstack,
'html_default_sidebars': html_default_sidebars,
'github_base': 'https://github.com/saltstack/salt',
'github_issues': 'https://github.com/saltstack/salt/issues',
'github_downloads': 'https://github.com/saltstack/salt/downloads',
}
html_use_index = True
html_last_updated_fmt = '%b %d, %Y'
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
### Latex options
latex_documents = [
('contents', 'Salt.tex', 'Salt Documentation', 'SaltStack, Inc.', 'manual'),
]
latex_logo = '_static/salt-logo.pdf'
latex_elements = {
'inputenc': '', # use XeTeX instead of the inputenc LaTeX package.
'utf8extra': '',
'preamble': '''
\usepackage{fontspec}
\setsansfont{DejaVu Sans}
\setromanfont{DejaVu Serif}
\setmonofont{DejaVu Sans Mono}
''',
}
### Linkcheck options
linkcheck_ignore = [r'http://127.0.0.1',
r'http://salt:\d+',
r'http://local:\d+',
r'https://console.aws.amazon.com',
r'http://192.168.33.10',
r'http://domain:\d+',
r'http://123.456.789.012:\d+',
r'http://localhost',
r'https://groups.google.com/forum/#!forum/salt-users',
r'http://logstash.net/docs/latest/inputs/udp',
r'http://logstash.net/docs/latest/inputs/zeromq',
r'http://www.youtube.com/saltstack',
r'http://raven.readthedocs.org',
r'https://getsentry.com',
r'http://salt-cloud.readthedocs.org',
r'http://salt.readthedocs.org',
r'http://www.pip-installer.org/',
r'http://www.windowsazure.com/',
r'https://github.com/watching',
r'dash-feed://',
r'https://github.com/saltstack/salt/',
r'http://bootstrap.saltstack.org',
r'https://bootstrap.saltstack.com',
r'https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh',
r'media.readthedocs.org/dash/salt/latest/salt.xml',
r'https://portal.aws.amazon.com/gp/aws/securityCredentials',
r'https://help.github.com/articles/fork-a-repo',
r'dash-feed://https%3A//media.readthedocs.org/dash/salt/latest/salt.xml'
]
linkcheck_anchors = False
### Manpage options
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
authors = [
'Thomas S. Hatch <[email protected]> and many others, please see the Authors file',
]
man_pages = [
('contents', 'salt', 'Salt Documentation', authors, 7),
('ref/cli/salt', 'salt', 'salt', authors, 1),
('ref/cli/salt-master', 'salt-master', 'salt-master Documentation', authors, 1),
('ref/cli/salt-minion', 'salt-minion', 'salt-minion Documentation', authors, 1),
('ref/cli/salt-key', 'salt-key', 'salt-key Documentation', authors, 1),
('ref/cli/salt-cp', 'salt-cp', 'salt-cp Documentation', authors, 1),
('ref/cli/salt-call', 'salt-call', 'salt-call Documentation', authors, 1),
('ref/cli/salt-syndic', 'salt-syndic', 'salt-syndic Documentation', authors, 1),
('ref/cli/salt-run', 'salt-run', 'salt-run Documentation', authors, 1),
('ref/cli/salt-ssh', 'salt-ssh', 'salt-ssh Documentation', authors, 1),
('ref/cli/salt-cloud', 'salt-cloud', 'Salt Cloud Command', authors, 1),
('ref/cli/salt-api', 'salt-api', 'salt-api Command', authors, 1),
]
### epub options
epub_title = 'Salt Documentation'
epub_author = 'SaltStack, Inc.'
epub_publisher = epub_author
epub_copyright = copyright
epub_scheme = 'URL'
epub_identifier = 'http://saltstack.org/'
#epub_tocdepth = 3
def skip_mod_init_member(app, what, name, obj, skip, options):
if name.startswith('_'):
return True
if isinstance(obj, types.FunctionType) and obj.__name__ == 'mod_init':
return True
return False
def _normalize_version(args):
_, path = args
return '.'.join([x.zfill(4) for x in (path.split('/')[-1].split('.'))])
class ReleasesTree(TocTree):
option_spec = dict(TocTree.option_spec)
def run(self):
rst = super(ReleasesTree, self).run()
entries = rst[0][0]['entries'][:]
entries.sort(key=_normalize_version, reverse=True)
rst[0][0]['entries'][:] = entries
return rst
def setup(app):
app.add_directive('releasestree', ReleasesTree)
app.connect('autodoc-skip-member', skip_mod_init_member)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/system-integration-test/sit-jee-app/src/main/java/io/joynr/systemintegrationtest/jee/ControllerBean.java | /*
* #%L
* %%
* Copyright (C) 2018 BMW Car IT GmbH
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package io.joynr.systemintegrationtest.jee;
import static io.joynr.systemintegrationtest.jee.JoynrConfigurationProvider.SIT_DOMAIN_PREFIX;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
import javax.ejb.ConcurrencyManagement;
import javax.ejb.ConcurrencyManagementType;
import javax.ejb.Stateless;
import javax.inject.Inject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.joynr.arbitration.DiscoveryQos;
import io.joynr.exceptions.JoynrRuntimeException;
import io.joynr.jeeintegration.api.ServiceLocator;
import io.joynr.jeeintegration.api.ServiceProvider;
import joynr.test.SitControllerSync;
import joynr.test.SystemIntegrationTestSync;
@Stateless
@ServiceProvider(serviceInterface = SitControllerSync.class)
@ConcurrencyManagement(ConcurrencyManagementType.BEAN)
public class ControllerBean implements SitControllerSync {
private static final Logger logger = LoggerFactory.getLogger(ControllerBean.class);
private ServiceLocator serviceLocator;
private String configuredDomain;
@Inject
public ControllerBean(ServiceLocator serviceLocator) {
this.serviceLocator = serviceLocator;
configuredDomain = System.getenv("SIT_DOMAIN");
}
@Override
public String ping() {
logger.info("ping called");
return "OK";
}
@Override
public String waitForStatelessResult(Integer timeoutMs) {
String errorMsg = "SIT RESULT error: waitForStatelessResult NOT IMPLEMENTED";
logger.error(errorMsg);
return errorMsg;
}
@Override
public String triggerTests(String domains, Boolean expectFailure) {
logger.info("triggerTests called \n");
StringBuffer result = new StringBuffer();
if (expectFailure) {
callProducerWithExpectedFailure("failure_" + SIT_DOMAIN_PREFIX, result);
} else {
logger.info("testDomains: " + domains + " \n");
String[] testDomains = domains.split("\\|");
for (String domain : testDomains) {
logger.info("received domain " + domain + "\n");
}
for (String appendValue : new String[]{ ".jee", ".java", ".cpp", ".node" }) {
for (String testDomain : testDomains) {
callProducer(testDomain + "_" + SIT_DOMAIN_PREFIX + appendValue, result);
}
}
}
return Base64.getMimeEncoder().encodeToString(result.toString().getBytes(StandardCharsets.UTF_8));
}
private void callProducer(String domain, StringBuffer result) {
try {
logger.info("callProducer with domain " + domain + " called \n");
String[] configuredGbids = System.getenv("SIT_GBIDS").trim().split(",");
DiscoveryQos discoveryQos = new DiscoveryQos();
discoveryQos.setDiscoveryTimeoutMs(90000); // 90 Seconds
SystemIntegrationTestSync proxy = serviceLocator.builder(SystemIntegrationTestSync.class, domain)
.withDiscoveryQos(discoveryQos)
.withGbids(configuredGbids)
.build();
Integer additionResult = proxy.add(1, 1);
if (additionResult != 2) {
throw new IllegalArgumentException("1 + 1 should be 2, got: " + additionResult);
}
result.append("SIT RESULT success: JEE consumer ").append(configuredDomain).append(" -> ").append(domain);
} catch (Exception e) {
logger.error("Exception in callProducer: " + e);
result.append("SIT RESULT error: JEE consumer ")
.append(configuredDomain)
.append(" -> ")
.append(domain)
.append("\nException: ")
.append(e.toString());
Util.addStacktraceToResultString(e, result);
}
result.append("\n");
}
private void callProducerWithExpectedFailure(String domain, StringBuffer result) {
try {
logger.info("callProducerWithExpectedFailure called \n");
DiscoveryQos discoveryQos = new DiscoveryQos();
discoveryQos.setDiscoveryTimeoutMs(10000); // 10 Seconds
SystemIntegrationTestSync proxy = serviceLocator.builder(SystemIntegrationTestSync.class, domain)
.withDiscoveryQos(discoveryQos)
.withGbids(new String[]{ "invalid" })
.build();
Integer additionResult = proxy.add(1, 1);
if (additionResult != 2) {
throw new IllegalArgumentException("1 + 1 should be 2, got: " + additionResult);
}
result.append("SIT RESULT error: JEE consumer ").append(configuredDomain).append(" -> ").append(domain);
} catch (JoynrRuntimeException e) {
result.append("SIT RESULT success: JEE consumer ").append(configuredDomain).append(" -> ").append(domain);
} catch (Exception e) {
result.append("SIT RESULT error: JEE consumer ")
.append(configuredDomain)
.append(" -> ")
.append(domain)
.append("\nException: ")
.append(e.toString());
Util.addStacktraceToResultString(e, result);
}
result.append("\n");
}
}
| [
"\"SIT_DOMAIN\"",
"\"SIT_GBIDS\""
]
| []
| [
"SIT_GBIDS",
"SIT_DOMAIN"
]
| [] | ["SIT_GBIDS", "SIT_DOMAIN"] | java | 2 | 0 | |
samples/aci-ap/ap.go | package main
import (
"log"
"os"
"github.com/udhos/acigo/aci"
)
func main() {
debug := os.Getenv("DEBUG") != ""
if len(os.Args) < 3 {
log.Fatalf("usage: %s add|del|list tenant ap [description]", os.Args[0])
}
cmd := os.Args[1]
tenant := os.Args[2]
isList := cmd == "list"
var name, descr string
if !isList {
if len(os.Args) < 4 {
log.Fatalf("usage: %s add|del|list tenant ap [description]", os.Args[0])
}
name = os.Args[3]
if len(os.Args) > 4 {
descr = os.Args[4]
}
}
a := login(debug)
defer logout(a)
// add/del ap
execute(a, cmd, tenant, name, descr)
// display existing
aps, errList := a.ApplicationProfileList(tenant)
if errList != nil {
log.Printf("could not list application profiles: %v", errList)
return
}
for _, t := range aps {
name := t["name"]
dn := t["dn"]
descr := t["descr"]
log.Printf("FOUND application profile: name=%s dn=%s descr=%s\n", name, dn, descr)
}
}
func execute(a *aci.Client, cmd, tenant, name, descr string) {
switch cmd {
case "add":
errAdd := a.ApplicationProfileAdd(tenant, name, descr)
if errAdd != nil {
log.Printf("FAILURE: add error: %v", errAdd)
return
}
log.Printf("SUCCESS: add: %s", name)
case "del":
errDel := a.ApplicationProfileDel(tenant, name)
if errDel != nil {
log.Printf("FAILURE: del error: %v", errDel)
return
}
log.Printf("SUCCESS: del: %s", name)
case "list":
default:
log.Printf("unknown command: %s", cmd)
}
}
func login(debug bool) *aci.Client {
a, errNew := aci.New(aci.ClientOptions{Debug: debug})
if errNew != nil {
log.Printf("login new client error: %v", errNew)
os.Exit(1)
}
errLogin := a.Login()
if errLogin != nil {
log.Printf("login error: %v", errLogin)
os.Exit(1)
}
return a
}
func logout(a *aci.Client) {
errLogout := a.Logout()
if errLogout != nil {
log.Printf("logout error: %v", errLogout)
return
}
log.Printf("logout: done")
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
conans/test/util/tools_test.py | # -*- coding: utf-8 -*-
import mock
import os
import platform
import unittest
from collections import namedtuple
from six import StringIO
from conans.client.client_cache import CONAN_CONF
from conans import tools
from conans.client.conan_api import ConanAPIV1
from conans.client.conf import default_settings_yml, default_client_conf
from conans.client.output import ConanOutput
from conans.client.tools.win import vcvars_dict
from conans.client.tools.scm import Git
from conans.errors import ConanException, NotFoundException
from conans.model.settings import Settings
from conans.test.utils.runner import TestRunner
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient, TestBufferConanOutput, create_local_git_repo
from conans.tools import which
from conans.tools import OSInfo, SystemPackageTool, replace_in_file, AptTool, ChocolateyTool,\
set_global_instances
from conans.util.files import save, load, md5
import requests
class RunnerMock(object):
def __init__(self, return_ok=True):
self.command_called = None
self.return_ok = return_ok
def __call__(self, command, output, win_bash=False, subsystem=None): # @UnusedVariable
self.command_called = command
self.win_bash = win_bash
self.subsystem = subsystem
return 0 if self.return_ok else 1
class ReplaceInFileTest(unittest.TestCase):
def setUp(self):
text = u'J\xe2nis\xa7'
self.tmp_folder = temp_folder()
self.win_file = os.path.join(self.tmp_folder, "win_encoding.txt")
text = text.encode("Windows-1252", "ignore")
with open(self.win_file, "wb") as handler:
handler.write(text)
self.bytes_file = os.path.join(self.tmp_folder, "bytes_encoding.txt")
with open(self.bytes_file, "wb") as handler:
handler.write(text)
def test_replace_in_file(self):
replace_in_file(self.win_file, "nis", "nus")
replace_in_file(self.bytes_file, "nis", "nus")
content = tools.load(self.win_file)
self.assertNotIn("nis", content)
self.assertIn("nus", content)
content = tools.load(self.bytes_file)
self.assertNotIn("nis", content)
self.assertIn("nus", content)
class ToolsTest(unittest.TestCase):
def load_save_test(self):
folder = temp_folder()
path = os.path.join(folder, "file")
save(path, u"äüïöñç")
content = load(path)
self.assertEqual(content, u"äüïöñç")
def md5_test(self):
result = md5(u"äüïöñç")
self.assertEqual("dfcc3d74aa447280a7ecfdb98da55174", result)
def cpu_count_test(self):
cpus = tools.cpu_count()
self.assertIsInstance(cpus, int)
self.assertGreaterEqual(cpus, 1)
with tools.environment_append({"CONAN_CPU_COUNT": "34"}):
self.assertEquals(tools.cpu_count(), 34)
def get_env_unit_test(self):
"""
Unit tests tools.get_env
"""
# Test default
self.assertIsNone(
tools.get_env("NOT_DEFINED", environment={}),
None
)
# Test defined default
self.assertEqual(
tools.get_env("NOT_DEFINED_KEY", default="random_default", environment={}),
"random_default"
)
# Test return defined string
self.assertEqual(
tools.get_env("FROM_STR", default="", environment={"FROM_STR": "test_string_value"}),
"test_string_value"
)
# Test boolean conversion
self.assertEqual(
tools.get_env("BOOL_FROM_STR", default=False, environment={"BOOL_FROM_STR": "1"}),
True
)
self.assertEqual(
tools.get_env("BOOL_FROM_STR", default=True, environment={"BOOL_FROM_STR": "0"}),
False
)
self.assertEqual(
tools.get_env("BOOL_FROM_STR", default=False, environment={"BOOL_FROM_STR": "True"}),
True
)
self.assertEqual(
tools.get_env("BOOL_FROM_STR", default=True, environment={"BOOL_FROM_STR": ""}),
False
)
# Test int conversion
self.assertEqual(
tools.get_env("TO_INT", default=2, environment={"TO_INT": "1"}),
1
)
# Test float conversion
self.assertEqual(
tools.get_env("TO_FLOAT", default=2.0, environment={"TO_FLOAT": "1"}),
1.0
),
# Test list conversion
self.assertEqual(
tools.get_env("TO_LIST", default=[], environment={"TO_LIST": "1,2,3"}),
["1", "2", "3"]
)
self.assertEqual(
tools.get_env("TO_LIST_NOT_TRIMMED", default=[], environment={"TO_LIST_NOT_TRIMMED": " 1 , 2 , 3 "}),
["1", "2", "3"]
)
def test_get_env_in_conanfile(self):
"""
Test get_env is available and working in conanfile
"""
client = TestClient()
conanfile = """from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
def build(self):
run_tests = tools.get_env("CONAN_RUN_TESTS", default=False)
print("test_get_env_in_conafile CONAN_RUN_TESTS=%r" % run_tests)
assert(run_tests == True)
"""
client.save({"conanfile.py": conanfile})
with tools.environment_append({"CONAN_RUN_TESTS": "1"}):
client.run("install .")
client.run("build .")
def test_global_tools_overrided(self):
client = TestClient()
conanfile = """
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
def build(self):
assert(tools.net._global_requester != None)
assert(tools.files._global_output != None)
"""
client.save({"conanfile.py": conanfile})
client.run("install .")
client.run("build .")
# Not test the real commmand get_command if it's setting the module global vars
tmp = temp_folder()
conf = default_client_conf.replace("\n[proxies]", "\n[proxies]\nhttp = http://myproxy.com")
os.mkdir(os.path.join(tmp, ".conan"))
save(os.path.join(tmp, ".conan", CONAN_CONF), conf)
with tools.environment_append({"CONAN_USER_HOME": tmp}):
conan_api, _, _ = ConanAPIV1.factory()
conan_api.remote_list()
self.assertEquals(tools.net._global_requester.proxies, {"http": "http://myproxy.com"})
self.assertIsNotNone(tools.files._global_output.warn)
def test_environment_nested(self):
with tools.environment_append({"A": "1", "Z": "40"}):
with tools.environment_append({"A": "1", "B": "2"}):
with tools.environment_append({"A": "2", "B": "2"}):
self.assertEquals(os.getenv("A"), "2")
self.assertEquals(os.getenv("B"), "2")
self.assertEquals(os.getenv("Z"), "40")
self.assertEquals(os.getenv("A", None), "1")
self.assertEquals(os.getenv("B", None), "2")
self.assertEquals(os.getenv("A", None), "1")
self.assertEquals(os.getenv("Z", None), "40")
self.assertEquals(os.getenv("A", None), None)
self.assertEquals(os.getenv("B", None), None)
self.assertEquals(os.getenv("Z", None), None)
def system_package_tool_fail_when_not_0_returned_test(self):
def get_linux_error_message():
"""
Get error message for Linux platform if distro is supported, None otherwise
"""
os_info = OSInfo()
update_command = None
if os_info.with_apt:
update_command = "sudo apt-get update"
elif os_info.with_yum:
update_command = "sudo yum check-update"
elif os_info.with_zypper:
update_command = "sudo zypper --non-interactive ref"
elif os_info.with_pacman:
update_command = "sudo pacman -Syyu --noconfirm"
return "Command '{0}' failed".format(update_command) if update_command is not None else None
platform_update_error_msg = {
"Linux": get_linux_error_message(),
"Darwin": "Command 'brew update' failed",
"Windows": "Command 'choco outdated' failed" if which("choco.exe") else None,
}
runner = RunnerMock(return_ok=False)
pkg_tool = ChocolateyTool() if which("choco.exe") else None
spt = SystemPackageTool(runner=runner, tool=pkg_tool)
msg = platform_update_error_msg.get(platform.system(), None)
if msg is not None:
with self.assertRaisesRegexp(ConanException, msg):
spt.update()
else:
spt.update() # Won't raise anything because won't do anything
def system_package_tool_test(self):
with tools.environment_append({"CONAN_SYSREQUIRES_SUDO": "True"}):
runner = RunnerMock()
# fake os info to linux debian, default sudo
os_info = OSInfo()
os_info.is_macos = False
os_info.is_linux = True
os_info.is_windows = False
os_info.linux_distro = "debian"
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "sudo apt-get update")
os_info.linux_distro = "ubuntu"
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "sudo apt-get update")
os_info.linux_distro = "knoppix"
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "sudo apt-get update")
os_info.linux_distro = "fedora"
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "sudo yum check-update")
os_info.linux_distro = "opensuse"
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "sudo zypper --non-interactive ref")
os_info.linux_distro = "redhat"
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.install("a_package", force=False)
self.assertEquals(runner.command_called, "rpm -q a_package")
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "sudo yum install -y a_package")
os_info.linux_distro = "debian"
spt = SystemPackageTool(runner=runner, os_info=os_info)
with self.assertRaises(ConanException):
runner.return_ok = False
spt.install("a_package")
self.assertEquals(runner.command_called, "sudo apt-get install -y --no-install-recommends a_package")
runner.return_ok = True
spt.install("a_package", force=False)
self.assertEquals(runner.command_called, "dpkg -s a_package")
os_info.is_macos = True
os_info.is_linux = False
os_info.is_windows = False
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "brew update")
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "brew install a_package")
os_info.is_freebsd = True
os_info.is_macos = False
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "sudo pkg update")
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "sudo pkg install -y a_package")
spt.install("a_package", force=False)
self.assertEquals(runner.command_called, "pkg info a_package")
# Chocolatey is an optional package manager on Windows
if platform.system() == "Windows" and which("choco.exe"):
os_info.is_freebsd = False
os_info.is_windows = True
spt = SystemPackageTool(runner=runner, os_info=os_info, tool=ChocolateyTool())
spt.update()
self.assertEquals(runner.command_called, "choco outdated")
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "choco install --yes a_package")
spt.install("a_package", force=False)
self.assertEquals(runner.command_called,
'choco search --local-only --exact a_package | findstr /c:"1 packages installed."')
with tools.environment_append({"CONAN_SYSREQUIRES_SUDO": "False"}):
os_info = OSInfo()
os_info.is_linux = True
os_info.linux_distro = "redhat"
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "yum install -y a_package")
spt.update()
self.assertEquals(runner.command_called, "yum check-update")
os_info.linux_distro = "ubuntu"
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "apt-get install -y --no-install-recommends a_package")
spt.update()
self.assertEquals(runner.command_called, "apt-get update")
os_info.is_macos = True
os_info.is_linux = False
os_info.is_windows = False
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "brew update")
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "brew install a_package")
os_info.is_freebsd = True
os_info.is_macos = False
os_info.is_windows = False
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "pkg update")
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "pkg install -y a_package")
spt.install("a_package", force=False)
self.assertEquals(runner.command_called, "pkg info a_package")
os_info.is_solaris = True
os_info.is_freebsd = False
os_info.is_windows = False
spt = SystemPackageTool(runner=runner, os_info=os_info)
spt.update()
self.assertEquals(runner.command_called, "pkgutil --catalog")
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "pkgutil --install --yes a_package")
with tools.environment_append({"CONAN_SYSREQUIRES_SUDO": "True"}):
# Chocolatey is an optional package manager on Windows
if platform.system() == "Windows" and which("choco.exe"):
os_info.is_solaris = False
os_info.is_windows = True
spt = SystemPackageTool(runner=runner, os_info=os_info, tool=ChocolateyTool())
spt.update()
self.assertEquals(runner.command_called, "choco outdated")
spt.install("a_package", force=True)
self.assertEquals(runner.command_called, "choco install --yes a_package")
spt.install("a_package", force=False)
self.assertEquals(runner.command_called,
'choco search --local-only --exact a_package | findstr /c:"1 packages installed."')
def system_package_tool_try_multiple_test(self):
class RunnerMultipleMock(object):
def __init__(self, expected=None):
self.calls = 0
self.expected = expected
def __call__(self, command, output): # @UnusedVariable
self.calls += 1
return 0 if command in self.expected else 1
packages = ["a_package", "another_package", "yet_another_package"]
with tools.environment_append({"CONAN_SYSREQUIRES_SUDO": "True"}):
runner = RunnerMultipleMock(["dpkg -s another_package"])
spt = SystemPackageTool(runner=runner, tool=AptTool())
spt.install(packages)
self.assertEquals(2, runner.calls)
runner = RunnerMultipleMock(["sudo apt-get update",
"sudo apt-get install -y --no-install-recommends yet_another_package"])
spt = SystemPackageTool(runner=runner, tool=AptTool())
spt.install(packages)
self.assertEquals(7, runner.calls)
runner = RunnerMultipleMock(["sudo apt-get update"])
spt = SystemPackageTool(runner=runner, tool=AptTool())
with self.assertRaises(ConanException):
spt.install(packages)
self.assertEquals(7, runner.calls)
def system_package_tool_mode_test(self):
"""
System Package Tool mode is defined by CONAN_SYSREQUIRES_MODE env variable.
Allowed values: (enabled, verify, disabled). Parser accepts it in lower/upper case or any combination.
"""
class RunnerMultipleMock(object):
def __init__(self, expected=None):
self.calls = 0
self.expected = expected
def __call__(self, command, *args, **kwargs): # @UnusedVariable
self.calls += 1
return 0 if command in self.expected else 1
packages = ["a_package", "another_package", "yet_another_package"]
# Check invalid mode raises ConanException
with tools.environment_append({
"CONAN_SYSREQUIRES_MODE": "test_not_valid_mode",
"CONAN_SYSREQUIRES_SUDO": "True"
}):
runner = RunnerMultipleMock([])
spt = SystemPackageTool(runner=runner, tool=AptTool())
with self.assertRaises(ConanException) as exc:
spt.install(packages)
self.assertIn("CONAN_SYSREQUIRES_MODE=test_not_valid_mode is not allowed", str(exc.exception))
self.assertEquals(0, runner.calls)
# Check verify mode, a package report should be displayed in output and ConanException raised.
# No system packages are installed
with tools.environment_append({
"CONAN_SYSREQUIRES_MODE": "VeRiFy",
"CONAN_SYSREQUIRES_SUDO": "True"
}):
packages = ["verify_package", "verify_another_package", "verify_yet_another_package"]
runner = RunnerMultipleMock(["sudo apt-get update"])
spt = SystemPackageTool(runner=runner, tool=AptTool())
with self.assertRaises(ConanException) as exc:
spt.install(packages)
self.assertIn("Aborted due to CONAN_SYSREQUIRES_MODE=", str(exc.exception))
self.assertIn('\n'.join(packages), tools.system_pm._global_output)
self.assertEquals(3, runner.calls)
# Check disabled mode, a package report should be displayed in output.
# No system packages are installed
with tools.environment_append({
"CONAN_SYSREQUIRES_MODE": "DiSaBlEd",
"CONAN_SYSREQUIRES_SUDO": "True"
}):
packages = ["disabled_package", "disabled_another_package", "disabled_yet_another_package"]
runner = RunnerMultipleMock(["sudo apt-get update"])
spt = SystemPackageTool(runner=runner, tool=AptTool())
spt.install(packages)
self.assertIn('\n'.join(packages), tools.system_pm._global_output)
self.assertEquals(0, runner.calls)
# Check enabled, default mode, system packages must be installed.
with tools.environment_append({
"CONAN_SYSREQUIRES_MODE": "EnAbLeD",
"CONAN_SYSREQUIRES_SUDO": "True"
}):
runner = RunnerMultipleMock(["sudo apt-get update"])
spt = SystemPackageTool(runner=runner, tool=AptTool())
with self.assertRaises(ConanException) as exc:
spt.install(packages)
self.assertNotIn("CONAN_SYSREQUIRES_MODE", str(exc.exception))
self.assertEquals(7, runner.calls)
def system_package_tool_installed_test(self):
if platform.system() != "Linux" and platform.system() != "Macos" and platform.system() != "Windows":
return
if platform.system() == "Windows" and not which("choco.exe"):
return
spt = SystemPackageTool()
expected_package = "git"
if platform.system() == "Windows" and which("choco.exe"):
spt = SystemPackageTool(tool=ChocolateyTool())
# Git is not installed by default on Chocolatey
expected_package = "chocolatey"
# The expected should be installed on development/testing machines
self.assertTrue(spt._tool.installed(expected_package))
# This package hopefully doesn't exist
self.assertFalse(spt._tool.installed("oidfjgesiouhrgioeurhgielurhgaeiorhgioearhgoaeirhg"))
def msvc_build_command_test(self):
if platform.system() != "Windows":
return
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "14"
# test build_type and arch override, for multi-config packages
cmd = tools.msvc_build_command(settings, "project.sln", build_type="Debug", arch="x86")
self.assertIn('msbuild project.sln /p:Configuration=Debug /p:Platform="x86"', cmd)
self.assertIn('vcvarsall.bat', cmd)
# tests errors if args not defined
with self.assertRaisesRegexp(ConanException, "Cannot build_sln_command"):
tools.msvc_build_command(settings, "project.sln")
settings.arch = "x86"
with self.assertRaisesRegexp(ConanException, "Cannot build_sln_command"):
tools.msvc_build_command(settings, "project.sln")
# succesful definition via settings
settings.build_type = "Debug"
cmd = tools.msvc_build_command(settings, "project.sln")
self.assertIn('msbuild project.sln /p:Configuration=Debug /p:Platform="x86"', cmd)
self.assertIn('vcvarsall.bat', cmd)
def vcvars_echo_test(self):
if platform.system() != "Windows":
return
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "14"
cmd = tools.vcvars_command(settings)
output = TestBufferConanOutput()
runner = TestRunner(output)
runner(cmd + " && set vs140comntools")
self.assertIn("vcvarsall.bat", str(output))
self.assertIn("VS140COMNTOOLS=", str(output))
with tools.environment_append({"VisualStudioVersion": "14"}):
output = TestBufferConanOutput()
runner = TestRunner(output)
cmd = tools.vcvars_command(settings)
runner(cmd + " && set vs140comntools")
self.assertNotIn("vcvarsall.bat", str(output))
self.assertIn("Conan:vcvars already set", str(output))
self.assertIn("VS140COMNTOOLS=", str(output))
def vcvars_raises_when_not_found_test(self):
text = """
os: [Windows]
compiler:
Visual Studio:
version: ["5"]
"""
settings = Settings.loads(text)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "5"
with self.assertRaisesRegexp(ConanException, "VS non-existing installation: Visual Studio 5"):
tools.vcvars_command(settings)
@unittest.skipUnless(platform.system() == "Windows", "Requires Windows")
def vcvars_constrained_test(self):
text = """os: [Windows]
compiler:
Visual Studio:
version: ["14"]
"""
settings = Settings.loads(text)
settings.os = "Windows"
settings.compiler = "Visual Studio"
with self.assertRaisesRegexp(ConanException,
"compiler.version setting required for vcvars not defined"):
tools.vcvars_command(settings)
new_out = StringIO()
tools.set_global_instances(ConanOutput(new_out), None)
settings.compiler.version = "14"
with tools.environment_append({"vs140comntools": "path/to/fake"}):
tools.vcvars_command(settings)
with tools.environment_append({"VisualStudioVersion": "12"}):
with self.assertRaisesRegexp(ConanException,
"Error, Visual environment already set to 12"):
tools.vcvars_command(settings)
with tools.environment_append({"VisualStudioVersion": "12"}):
# Not raising
tools.vcvars_command(settings, force=True)
def vcvars_context_manager_test(self):
conanfile = """
from conans import ConanFile, tools
class MyConan(ConanFile):
name = "MyConan"
version = "0.1"
settings = "os", "compiler"
def build(self):
with tools.vcvars(self.settings, only_diff=True):
self.output.info("VCINSTALLDIR set to: " + str(tools.get_env("VCINSTALLDIR")))
"""
client = TestClient()
client.save({"conanfile.py": conanfile})
if platform.system() == "Windows":
client.run("create . conan/testing")
self.assertNotIn("VCINSTALLDIR set to: None", client.out)
else:
client.run("create . conan/testing")
self.assertIn("VCINSTALLDIR set to: None", client.out)
@unittest.skipUnless(platform.system() == "Windows", "Requires Windows")
def vcvars_dict_diff_test(self):
text = """
os: [Windows]
compiler:
Visual Studio:
version: ["14"]
"""
settings = Settings.loads(text)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "14"
with tools.environment_append({"MYVAR": "1"}):
ret = vcvars_dict(settings, only_diff=False)
self.assertIn("MYVAR", ret)
self.assertIn("VCINSTALLDIR", ret)
ret = vcvars_dict(settings)
self.assertNotIn("MYVAR", ret)
self.assertIn("VCINSTALLDIR", ret)
def vcvars_dict_test(self):
# https://github.com/conan-io/conan/issues/2904
output_with_newline_and_spaces = """__BEGINS__
PROCESSOR_ARCHITECTURE=AMD64
PROCESSOR_IDENTIFIER=Intel64 Family 6 Model 158 Stepping 9, GenuineIntel
PROCESSOR_LEVEL=6
PROCESSOR_REVISION=9e09
set nl=^
env_var=
without_equals_sign
ProgramFiles(x86)=C:\Program Files (x86)
""".encode("utf-8")
def vcvars_command_mock(settings, arch, compiler_version, force, vcvars_ver, winsdk_version): # @UnusedVariable
return "unused command"
def subprocess_check_output_mock(cmd, shell):
self.assertIn("unused command", cmd)
return output_with_newline_and_spaces
with mock.patch('conans.client.tools.win.vcvars_command', new=vcvars_command_mock):
with mock.patch('subprocess.check_output', new=subprocess_check_output_mock):
vars = tools.vcvars_dict(None, only_diff=False)
self.assertEqual(vars["PROCESSOR_ARCHITECTURE"], "AMD64")
self.assertEqual(vars["PROCESSOR_IDENTIFIER"], "Intel64 Family 6 Model 158 Stepping 9, GenuineIntel")
self.assertEqual(vars["PROCESSOR_LEVEL"], "6")
self.assertEqual(vars["PROCESSOR_REVISION"], "9e09")
self.assertEqual(vars["ProgramFiles(x86)"], "C:\Program Files (x86)")
def run_in_bash_test(self):
if platform.system() != "Windows":
return
class MockConanfile(object):
def __init__(self):
self.output = namedtuple("output", "info")(lambda x: None) # @UnusedVariable
self.env = {"PATH": "/path/to/somewhere"}
class MyRun(object):
def __call__(self, command, output, log_filepath=None, cwd=None, subprocess=False): # @UnusedVariable
self.command = command
self._runner = MyRun()
conanfile = MockConanfile()
tools.run_in_windows_bash(conanfile, "a_command.bat", subsystem="cygwin")
self.assertIn("bash", conanfile._runner.command)
self.assertIn("--login -c", conanfile._runner.command)
self.assertIn("^&^& a_command.bat ^", conanfile._runner.command)
with tools.environment_append({"CONAN_BASH_PATH": "path\\to\\mybash.exe"}):
tools.run_in_windows_bash(conanfile, "a_command.bat", subsystem="cygwin")
self.assertIn('path\\to\\mybash.exe --login -c', conanfile._runner.command)
with tools.environment_append({"CONAN_BASH_PATH": "path with spaces\\to\\mybash.exe"}):
tools.run_in_windows_bash(conanfile, "a_command.bat", subsystem="cygwin")
self.assertIn('"path with spaces\\to\\mybash.exe" --login -c', conanfile._runner.command)
# try to append more env vars
conanfile = MockConanfile()
tools.run_in_windows_bash(conanfile, "a_command.bat", subsystem="cygwin", env={"PATH": "/other/path",
"MYVAR": "34"})
self.assertIn('^&^& PATH=\\^"/cygdrive/other/path:/cygdrive/path/to/somewhere:$PATH\\^" '
'^&^& MYVAR=34 ^&^& a_command.bat ^', conanfile._runner.command)
def download_retries_test(self):
out = TestBufferConanOutput()
set_global_instances(out, requests)
# Connection error
with self.assertRaisesRegexp(ConanException, "HTTPConnectionPool"):
tools.download("http://fakeurl3.es/nonexists",
os.path.join(temp_folder(), "file.txt"), out=out,
retry=3, retry_wait=0)
# Not found error
self.assertEquals(str(out).count("Waiting 0 seconds to retry..."), 2)
with self.assertRaisesRegexp(NotFoundException, "Not found: "):
tools.download("https://github.com/conan-io/conan/blob/develop/FILE_NOT_FOUND.txt",
os.path.join(temp_folder(), "README.txt"), out=out,
retry=3, retry_wait=0)
# And OK
dest = os.path.join(temp_folder(), "manual.html")
tools.download("http://www.zlib.net/manual.html",
dest, out=out,
retry=3, retry_wait=0)
self.assertTrue(os.path.exists(dest))
content = load(dest)
# overwrite = False
with self.assertRaises(ConanException):
tools.download("http://www.zlib.net/manual.html",
dest, out=out,
retry=3, retry_wait=0, overwrite=False)
# overwrite = True
tools.download("http://www.zlib.net/manual.html",
dest, out=out,
retry=3, retry_wait=0, overwrite=True)
self.assertTrue(os.path.exists(dest))
content_new = load(dest)
self.assertEqual(content, content_new)
# Not authorized
with self.assertRaises(ConanException):
tools.download("https://httpbin.org/basic-auth/user/passwd", dest, overwrite=True)
# Authorized
tools.download("https://httpbin.org/basic-auth/user/passwd", dest, auth=("user", "passwd"),
overwrite=True)
# Authorized using headers
tools.download("https://httpbin.org/basic-auth/user/passwd", dest,
headers={"Authorization": "Basic dXNlcjpwYXNzd2Q="}, overwrite=True)
def get_gnu_triplet_test(self):
def get_values(this_os, this_arch, setting_os, setting_arch, compiler=None):
build = tools.get_gnu_triplet(this_os, this_arch, compiler)
host = tools.get_gnu_triplet(setting_os, setting_arch, compiler)
return build, host
build, host = get_values("Linux", "x86_64", "Linux", "armv7hf")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "arm-linux-gnueabihf")
build, host = get_values("Linux", "x86", "Linux", "armv7hf")
self.assertEquals(build, "x86-linux-gnu")
self.assertEquals(host, "arm-linux-gnueabihf")
build, host = get_values("Linux", "x86_64", "Linux", "x86")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "x86-linux-gnu")
build, host = get_values("Linux", "x86_64", "Windows", "x86", compiler="gcc")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "i686-w64-mingw32")
build, host = get_values("Linux", "x86_64", "Windows", "x86", compiler="Visual Studio")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "i686-windows-msvc") # Not very common but exists sometimes
build, host = get_values("Linux", "x86_64", "Linux", "armv7hf")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "arm-linux-gnueabihf")
build, host = get_values("Linux", "x86_64", "Linux", "armv7")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "arm-linux-gnueabi")
build, host = get_values("Linux", "x86_64", "Linux", "armv6")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "arm-linux-gnueabi")
build, host = get_values("Linux", "x86_64", "Android", "x86")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "i686-linux-android")
build, host = get_values("Linux", "x86_64", "Android", "x86_64")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "x86_64-linux-android")
build, host = get_values("Linux", "x86_64", "Android", "armv7")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "arm-linux-androideabi")
build, host = get_values("Linux", "x86_64", "Android", "armv7hf")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "arm-linux-androideabi")
build, host = get_values("Linux", "x86_64", "Android", "armv8")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "aarch64-linux-android")
build, host = get_values("Linux", "x86_64", "Android", "armv6")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "arm-linux-androideabi")
build, host = get_values("Linux", "x86_64", "Windows", "x86", compiler="gcc")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "i686-w64-mingw32")
build, host = get_values("Linux", "x86_64", "Windows", "x86_64", compiler="gcc")
self.assertEquals(build, "x86_64-linux-gnu")
self.assertEquals(host, "x86_64-w64-mingw32")
build, host = get_values("Windows", "x86_64", "Windows", "x86", compiler="gcc")
self.assertEquals(build, "x86_64-w64-mingw32")
self.assertEquals(host, "i686-w64-mingw32")
build, host = get_values("Windows", "x86_64", "Linux", "armv7hf", compiler="gcc")
self.assertEquals(build, "x86_64-w64-mingw32")
self.assertEquals(host, "arm-linux-gnueabihf")
build, host = get_values("Darwin", "x86_64", "Android", "armv7hf")
self.assertEquals(build, "x86_64-apple-darwin")
self.assertEquals(host, "arm-linux-androideabi")
build, host = get_values("Darwin", "x86_64", "Macos", "x86")
self.assertEquals(build, "x86_64-apple-darwin")
self.assertEquals(host, "i686-apple-darwin")
build, host = get_values("Darwin", "x86_64", "iOS", "armv7")
self.assertEquals(build, "x86_64-apple-darwin")
self.assertEquals(host, "arm-apple-darwin")
build, host = get_values("Darwin", "x86_64", "watchOS", "armv7k")
self.assertEquals(build, "x86_64-apple-darwin")
self.assertEquals(host, "arm-apple-darwin")
build, host = get_values("Darwin", "x86_64", "tvOS", "armv8")
self.assertEquals(build, "x86_64-apple-darwin")
self.assertEquals(host, "aarch64-apple-darwin")
for os in ["Windows", "Linux"]:
for arch in ["x86_64", "x86"]:
triplet = tools.get_gnu_triplet(os, arch, "gcc")
output = ""
if arch == "x86_64":
output += "x86_64"
else:
output += "i686" if os != "Linux" else "x86"
output += "-"
if os == "Windows":
output += "w64-mingw32"
else:
output += "linux-gnu"
self.assertIn(output, triplet)
# Compiler not specified for os="Windows"
with self.assertRaises(ConanException):
tools.get_gnu_triplet("Windows", "x86")
def detect_windows_subsystem_test(self):
# Dont raise test
result = tools.os_info.detect_windows_subsystem()
if not tools.os_info.bash_path or platform.system() != "Windows":
self.assertEqual(None, result)
else:
self.assertEqual(str, type(result))
class GitToolTest(unittest.TestCase):
def test_clone_git(self):
path, _ = create_local_git_repo({"myfile": "contents"})
tmp = temp_folder()
git = Git(tmp)
git.clone(path)
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
def test_clone_existing_folder_git(self):
path, commit = create_local_git_repo({"myfile": "contents"}, branch="my_release")
tmp = temp_folder()
save(os.path.join(tmp, "file"), "dummy contents")
git = Git(tmp)
git.clone(path, branch="my_release")
self.assertTrue(os.path.exists(os.path.join(tmp, "myfile")))
# Checkout a commit
git.checkout(commit)
self.assertEquals(git.get_revision(), commit)
def test_clone_existing_folder_without_branch(self):
tmp = temp_folder()
save(os.path.join(tmp, "file"), "dummy contents")
git = Git(tmp)
with self.assertRaisesRegexp(ConanException, "The destination folder is not empty, "
"specify a branch to checkout"):
git.clone("https://github.com/conan-community/conan-zlib.git")
def test_credentials(self):
tmp = temp_folder()
git = Git(tmp, username="peter", password="otool")
url_credentials = git.get_url_with_credentials("https://some.url.com")
self.assertEquals(url_credentials, "https://peter:[email protected]")
def test_verify_ssl(self):
class MyRunner(object):
def __init__(self):
self.calls = []
def __call__(self, *args, **kwargs):
self.calls.append(args[0])
return ""
runner = MyRunner()
tmp = temp_folder()
git = Git(tmp, username="peter", password="otool", verify_ssl=True, runner=runner,
force_english=True)
git.clone(url="https://myrepo.git")
self.assertIn("git config http.sslVerify true", runner.calls[1])
runner = MyRunner()
git = Git(tmp, username="peter", password="otool", verify_ssl=False, runner=runner,
force_english=False)
git.clone(url="https://myrepo.git")
self.assertIn("git config http.sslVerify false", runner.calls[1])
def git_helper_in_recipe_test(self):
client = TestClient()
git_repo = temp_folder()
save(os.path.join(git_repo, "file.h"), "contents")
client.runner("git init .", cwd=git_repo)
client.runner('git config user.email "[email protected]"', cwd=git_repo)
client.runner('git config user.name "Your Name"', cwd=git_repo)
client.runner("git checkout -b dev", cwd=git_repo)
client.runner("git add .", cwd=git_repo)
client.runner('git commit -m "comm"', cwd=git_repo)
conanfile = """
import os
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "other"
def source(self):
git = tools.Git()
git.clone("%s", "dev")
def build(self):
assert(os.path.exists("file.h"))
""" % git_repo.replace("\\", "/")
client.save({"conanfile.py": conanfile, "other": "hello"})
client.run("create . user/channel")
# Now clone in a subfolder with later checkout
conanfile = """
import os
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "other"
def source(self):
tools.mkdir("src")
git = tools.Git("./src")
git.clone("%s")
git.checkout("dev")
def build(self):
assert(os.path.exists(os.path.join("src", "file.h")))
""" % git_repo.replace("\\", "/")
client.save({"conanfile.py": conanfile, "other": "hello"})
client.run("create . user/channel")
# Base dir, with exports without subfolder and not specifying checkout fails
conanfile = """
import os
from conans import ConanFile, tools
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "other"
def source(self):
git = tools.Git()
git.clone("%s")
def build(self):
assert(os.path.exists("file.h"))
""" % git_repo.replace("\\", "/")
client.save({"conanfile.py": conanfile, "other": "hello"})
client.run("create . user/channel", ignore_error=True)
self.assertIn("The destination folder is not empty, "
"specify a branch to checkout", client.out)
| []
| []
| [
"Z",
"B",
"A"
]
| [] | ["Z", "B", "A"] | python | 3 | 0 | |
scrapers.py | # !/usr/bin/env python3
import os
import sys
import time
from datetime import datetime, date, timezone
import urllib.parse
import requests
from bs4 import BeautifulSoup
from nameparser import HumanName
from airtable import Airtable
import standardize
airtab = Airtable(os.environ['jail_scrapers_db'], 'intakes',
os.environ['AIRTABLE_API_KEY'])
airtab_log = Airtable(os.environ['log_db'], 'log', os.environ['AIRTABLE_API_KEY'])
muh_headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}
def get_name(raw_name, this_dict):
name = HumanName(raw_name)
name.capitalize()
this_dict['first_name'] = name.first
this_dict['last_name'] = name.last
this_dict['middle_name'] = name.middle
this_dict['suffix'] = name.suffix
def update_record(this_dict, soup, m, lea_parser=None, raw_lea=''):
if this_dict['recent_text'] != m['fields']['recent_text']:
this_dict['updated'] = True
this_dict['html'] = soup.prettify()
if lea_parser:
lea_parser(raw_lea)
airtab.update(m['id'], this_dict, typecast=True)
def wrap_it_up(jail, t0, new_intakes, total_intakes):
this_dict = {'module': 'jail_scrapers/scrapers.py'}
this_dict['function'] = f"{jail}_scraper"
this_dict['duration'] = round(time.time() - t0, 2)
this_dict['total'] = total_intakes
this_dict['new'] = new_intakes
airtab_log.insert(this_dict, typecast=True)
def damn_it(error_message):
print('Another fucking "Connection Error."\n', error_message)
time.sleep(10)
def mcdc_scraper():
t0, new_intakes, total_intakes = time.time(), 0, 0
root_url = 'http://mydcstraining.com/agencyinfo/MS/4360/inmate/'
main_url = root_url + 'ICURRENT.HTM'
page = requests.get(main_url, headers=muh_headers)
soup = BeautifulSoup(page.text, 'html.parser')
dk_rows = soup.find_all('tr')
for dk_row in dk_rows:
cells = dk_row.find_all('td')
if len(cells) == 9:
total_intakes += 1
time.sleep(0.2)
this_dict = {'jail': 'mcdc', 'linking': ['rechGV1KWD0TWQPqv']}
this_dict['link'] = root_url + dk_row.a.get('href')
try:
r = requests.get(this_dict['link'], headers=muh_headers)
except requests.ConnectionError as err:
damn_it(err)
continue
this_dict['charge_1'] = cells[8].string.replace('\xa0', '')
if this_dict['charge_1'] == '18USC132518USC1325 ILLEGAL ENTRY-ALIEN':
this_dict['charge_1_statute'] = '8 U.S.C. 1325'
this_dict['charge_1_title'] = 'UNLAWFUL ENTRY'
this_dict['bk'] = cells[2].string.replace('\xa0', '')
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
this_dict['img_src'] = (
this_dict['link'].replace('ICUD', 'ICUP').replace('HTM', 'jpg')
)
data = []
soup = BeautifulSoup(r.text, 'html.parser')
for string in soup.stripped_strings:
data.append(str(string))
try:
this_dict['intake_number'] = data[1 + data.index('INTAKE #:')]
this_dict['DOI'] = f"{data[1 + data.index('INTAKE DATE:')]} {data[1 + data.index('TIME:')]}"
get_name(data[1 + data.index('Name:')], this_dict)
this_dict['DOB'] = data[1 + data.index('DOB:')]
this_dict['intake_age'] = int(data[1 + data.index('AGE:')])
this_dict['race'] = standardize.mcdc_race(raw_race=data[1 + data.index('RACE:')])
this_dict['sex'] = data[1 + data.index('SEX:')]
if data[1 + data.index('OFF DATE:')] != '00/00/0000':
this_dict['DOO'] = data[1 + data.index('OFF DATE:')]
this_dict['intake_case_number'] = data[1 + data.index('- Case#:')]
this_dict['intake_bond_written'] = repr(
data[1 + data.index('WRITTEN BOND:')]
).replace('\xa0', ' ')
this_dict['intake_bond_cash'] = repr(
data[1 + data.index('CASH BOND:')]
).replace('\xa0', ' ')
blocks = soup.find_all('table')
rows = blocks[9].find_all('tr')
charges = []
courts = []
bond_ammounts = []
for row in rows[1:]:
cells = row.find_all('td')
if len(cells) == 3:
charge_raw = cells[0].string.strip()
if ', ' in charge_raw:
charge = f"\"{charge_raw}\""
else:
charge = charge_raw
charges.append(charge)
court_raw = cells[1].string.strip()
if court_raw == 'OTHER COUR':
courts.append('OTHER COURT')
else:
courts.append(court_raw)
if cells[2].string:
amt = '$' + cells[2].string.strip()
bond_ammounts.append(amt)
this_dict['charges'] = ', '.join(charges)
this_dict['courts'] = ', '.join(courts)
this_dict['bond_ammounts'] = '\n'.join(bond_ammounts)
this_dict['recent_text'] = '\n'.join(data[data.index('Name:'):])
raw_lea = data[1 + data.index('ARRESTING AGENCY:')]
m = airtab.match('intake_number', this_dict['intake_number'], view='mcdc', fields='recent_text')
if not m:
this_dict['LEA'] = standardize.mcdc_lea(raw_lea)
this_dict['html'] = soup.prettify()
attachments_array = []
image_url = {'url': this_dict['img_src']}
attachments_array.append(image_url)
this_dict['PHOTO'] = attachments_array
airtab.insert(this_dict, typecast=True)
new_intakes += 1
else:
update_record(this_dict, soup, m, lea_parser=standardize.mcdc_lea, raw_lea=raw_lea)
except ValueError:
print('there was a value error for', this_dict['bk'])
wrap_it_up('mcdc', t0, new_intakes, total_intakes)
def prcdf_scraper():
t0, new_intakes, total_intakes = time.time(), 0, 0
main_url = 'http://mydcstraining.com/agencyinfo/MS/0055/inmate/ICURRENT.HTM'
page = requests.get(main_url, headers=muh_headers)
soup = BeautifulSoup(page.text, 'html.parser')
dk_rows = soup.find_all('tr')
for dk_row in dk_rows:
time.sleep(0.2)
cells = dk_row.find_all('td')
if len(cells) == 9:
total_intakes += 1
this_dict = {'jail': 'prcdf', 'linking': ['recqGceWKASe4gYEW']}
this_dict['link'] = urllib.parse.urljoin(main_url, dk_row.a.get('href'))
try:
r = requests.get(this_dict['link'], headers=muh_headers)
except requests.ConnectionError as err:
damn_it(err)
continue
charge_1 = cells[8].string.replace('\xa0', '')
if 'ã' in charge_1:
this_dict['charge_1'] = charge_1[0: charge_1.find('ã')]
else:
this_dict['charge_1'] = charge_1
if charge_1 == '18USC132518USC1325 ILLEGAL ENTRY-ALIEN':
this_dict['charge_1_statute'] = '8 U.S.C. 1325'
this_dict['charge_1_title'] = 'UNLAWFUL ENTRY'
this_dict['bk'] = cells[2].string.replace('\xa0', '')
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
this_dict['img_src'] = (
this_dict['link'].replace('ICUD', 'ICUP').replace('HTM', 'jpg')
)
data = []
soup = BeautifulSoup(r.text, 'html.parser')
for string in soup.stripped_strings:
data.append(str(string))
this_dict['intake_number'] = data[1 + data.index('INTAKE #:')]
this_dict['DOI'] = f"{data[1 + data.index('INTAKE DATE:')]} {data[1 + data.index('TIME:')]}"
get_name(data[1 + data.index('Name:')], this_dict)
this_dict['DOB'] = data[1 + data.index('DOB:')]
this_dict['intake_age'] = int(data[1 + data.index('AGE:')])
this_dict['race'] = standardize.prcdf_race(raw_race=data[1 + data.index('RACE:')])
this_dict['sex'] = data[1 + data.index('SEX:')]
if data[1 + data.index('OFF DATE:')] != '00/00/0000':
this_dict['DOO'] = data[1 + data.index('OFF DATE:')]
this_dict['intake_case_number'] = data[1 + data.index('- Case#:')]
this_dict['intake_bond_written'] = repr(
data[1 + data.index('WRITTEN BOND:')]
).replace('\xa0', ' ')
this_dict['intake_bond_cash'] = repr(
data[1 + data.index('CASH BOND:')]
).replace('\xa0', ' ')
blocks = soup.find_all('table')
rows = blocks[9].find_all('tr')
charges = []
courts = []
bond_ammounts = []
for row in rows[1:]:
cells = row.find_all('td')
if len(cells) == 3:
charge_raw = cells[0].string.strip()
court_raw = cells[1].string.strip()
if 'ã' in charge_raw:
charge = charge_raw[0: charge_raw.find('ã')]
else:
charge = charge_raw
if ', ' in charge:
charges.append(f"\"{charge}\"")
else:
charges.append(charge)
if ' C' in court_raw:
courts.append(court_raw[: court_raw.find(' C')])
elif court_raw == 'OTHER AGEN':
courts.append('OTHER AGENCY')
else:
courts.append(court_raw)
if cells[2].string:
amt = '$' + cells[2].string.strip()
bond_ammounts.append(amt)
this_dict['charges'] = ', '.join(charges)
this_dict['courts'] = ', '.join(courts)
this_dict['bond_ammounts'] = '\n'.join(bond_ammounts)
this_dict['recent_text'] = '\n'.join(data[data.index('Name:'):])
raw_lea = data[1 + data.index('ARRESTING AGENCY:')]
m = airtab.match(
'intake_number',
this_dict['intake_number'],
view='prcdf',
fields='recent_text',
)
if not m:
this_dict['LEA'] = standardize.prcdf_lea(raw_lea)
this_dict['html'] = soup.prettify()
attachments_array = []
image_url = {'url': this_dict['img_src']}
attachments_array.append(image_url)
this_dict['PHOTO'] = attachments_array
airtab.insert(this_dict, typecast=True)
new_intakes += 1
else:
update_record(this_dict, soup, m, standardize.prcdf_lea, raw_lea)
wrap_it_up('prcdf', t0, new_intakes, total_intakes)
def lcdc_scraper():
t0, new_intakes, total_intakes = time.time(), 0, 0
root_url = 'https://tcsi-roster.azurewebsites.net/'
main_url = (root_url + 'Default.aspx?i=26&code=Lee&type=roster')
r = requests.get(main_url)
urls = set()
soup = BeautifulSoup(r.text, 'html.parser')
for link in soup.find_all('a'):
url = link.get('href')
if url[:10] == 'InmateInfo':
urls.add(url)
total_intakes = len(urls)
for url in urls:
total_intakes += 1
this_dict = {'jail': 'lcdc', 'linking': ['rec44JJsg4vJMkUhI']}
this_dict['link'] = root_url + url
try:
r = requests.get(this_dict['link'])
except requests.ConnectionError as err:
damn_it(err)
continue
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
this_dict['bk'] = url[-6:]
soup = BeautifulSoup(r.text, 'html.parser')
raw_intake_number = soup.find(id='lblBookingNumber').string
if len(raw_intake_number) == 1:
this_dict['intake_number'] = f"{this_dict['bk']}_0{raw_intake_number}"
else:
this_dict['intake_number'] = f"{this_dict['bk']}_{raw_intake_number}"
data = []
for string in soup.stripped_strings:
data.append(string)
text_rn_start = data.index('Booking #') - 1
this_dict['recent_text'] = '\n'.join(data[text_rn_start: len(data) - 1])
raw_lea = soup.find(id='lblArrestingAgency').string
m = airtab.match('intake_number', this_dict['intake_number'], view='lcdc', fields='recent_text')
if not m:
this_dict['html'] = soup.prettify()
if soup.find(id='lblBookingDate').string:
this_dict['DOI'] = soup.find(id='lblBookingDate').string
this_dict['LEA'] = standardize.lcdc_lea(raw_lea)
this_dict['race'] = standardize.lcdc_race(raw_race=soup.find(id='lblRace').string)
get_name(soup.find(id='lblInmateName').string, this_dict)
this_dict['DOB'] = soup.find(id='lblBirthdate').string
this_dict['intake_age'] = int(soup.find(id='lblAge').string)
if soup.find(id='lblSex').string:
this_dict['sex'] = soup.find(id='lblSex').string[:1]
this_dict['glasses'] = soup.find(id='lblGlasses').string
this_dict['charge_1'] = soup.find(id='lblOffense').string
SDOR = soup.find(id='lblScheduleReleaseDate').string
if SDOR != 'N/A':
this_dict['SDOR'] = SDOR
airtab.insert(this_dict, typecast=True)
new_intakes += 1
else:
update_record(this_dict, soup, m, standardize.lcdc_lea, raw_lea)
wrap_it_up('lcdc', t0, new_intakes, total_intakes)
def tcdc_scraper():
t0, new_intakes, total_intakes = time.time(), 0, 0
url = 'https://www.tunicamssheriff.com/roster.php?grp=10'
docket_pages = set()
docket_pages.add(url)
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
for x in soup.find_all('a'):
y = x.get('href')
if y.startswith('roster.php?grp='):
page = urllib.parse.urljoin(url, y)
docket_pages.add(page)
intakes = []
for page in docket_pages:
r = requests.get(page)
soup = BeautifulSoup(r.text, 'html.parser')
for x in soup.find_all('a'):
link = x.get('href')
if link:
if link.startswith('roster_view.php?booking_num'):
intakes.append(link)
total_intakes = len(intakes)
for x in intakes:
this_dict = {'jail': 'tcdc', 'linking': ['rec9ach7LRF8DcIuM']}
this_dict['bk'] = x[-10:]
this_dict['link'] = f"https://www.tunicamssheriff.com/roster_view.php?booking_num={this_dict['bk']}"
try:
r = requests.get(this_dict['link'])
except requests.ConnectionError as err:
damn_it(err)
continue
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
soup = BeautifulSoup(r.text, 'html.parser')
data = []
for string in soup.stripped_strings:
data.append(str(string))
text_rn_start = data.index('Booking #:') - 1
messy_text_rn = '\n'.join(data[text_rn_start:]).strip()
this_dict['recent_text'] = messy_text_rn[
0: messy_text_rn.find('Note: ')
].strip()
try:
raw_lea = data[1 + data.index('Arresting Agency:')]
except ValueError:
raw_lea = ''
m = airtab.match('bk', this_dict['bk'], view='tcdc')
if not m:
this_dict['html'] = soup.prettify()
get_name(data[data.index('Booking #:') - 1], this_dict)
if 'Age:' in data:
this_dict['intake_age'] = int(data[1 + data.index('Age:')])
this_dict['sex'] = data[1 + data.index('Gender:')]
if data[1 + data.index('Race:')] != 'Arresting Agency:':
this_dict['race'] = data[1 + data.index('Race:')]
if raw_lea:
this_dict['LEA'] = standardize.tcdc_lea(raw_lea)
this_dict['DOI'] = datetime.strptime(data[1 + data.index('Booking Date:')],
'%m-%d-%Y - %I:%M %p').strftime('%m/%d/%Y %H:%M')
c = data[1 + data.index('Charges:')]
if c.startswith('Note:') or c.startswith('Bond:'):
this_dict['charge_1'] = ''
else:
this_dict['charge_1'] = c
if 'Bond:' in data:
this_dict['intake_bond_cash'] = data[1 + data.index('Bond:')]
this_dict[
'img_src'] = f"https://www.tunicamssheriff.com/templates/tunicamssheriff.com/images/inmates/{this_dict['bk']}.jpg"
image_url = {'url': this_dict['img_src']}
attachments_array = []
attachments_array.append(image_url)
this_dict['PHOTO'] = attachments_array
airtab.insert(this_dict, typecast=True)
new_intakes += 1
else:
update_record(this_dict, soup, m, standardize.tcdc_lea, raw_lea)
wrap_it_up('tcdc', t0, new_intakes, total_intakes)
def kcdc_scraper():
t0, new_intakes, total_intakes = time.time(), 0, 0
docket_pages = set()
docket_pages.add('roster.php?grp=10')
r = requests.get('https://www.kempercountysheriff.com/roster.php?grp=10')
soup = BeautifulSoup(r.text, 'html.parser').find(id='intContentContainer')
for x in soup.find_all('a'):
y = x.get('href')
if y.startswith('roster.php?grp='):
docket_pages.add(y)
for page in docket_pages:
page_url = f"https://www.kempercountysheriff.com/{page}"
r = requests.get(page_url)
soup = BeautifulSoup(r.text, 'html.parser').find_all(class_='inmateTable')
for inmate_block in soup:
x = inmate_block.find('a').get('href')
total_intakes += 1
this_dict = {'jail': 'kcdc', 'linking': ['recb9EZbIAZmUBofc']}
this_dict['link'] = f"https://www.kempercountysheriff.com/{x}"
try:
r = requests.get(this_dict['link'])
except requests.ConnectionError as err:
damn_it(err)
continue
this_dict['bk'] = x.replace('roster_view.php?booking_num=', '')
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
soup = BeautifulSoup(r.text, 'html.parser').find(id='intContentContainer')
data = []
for string in soup.stripped_strings:
data.append(str(string))
messy_text_rn = '\n'.join(data)
this_dict['recent_text'] = messy_text_rn[0: messy_text_rn.find('Note: ')].strip()
m = airtab.match('bk', this_dict['bk'], view='kcdc', fields='recent_text')
if not m:
this_dict['html'] = soup.prettify()
get_name(data[0], this_dict)
if 'Age:' in data:
this_dict['intake_age'] = int(data[1 + data.index('Age:')])
this_dict['sex'] = data[1 + data.index('Gender:')][:1]
if 'Race:' in data:
this_dict['race'] = standardize.kcdc_race(raw_race=data[1 + data.index('Race:')])
this_dict['DOI'] = datetime.strptime(
data[1 + data.index('Booking Date:')], '%m-%d-%Y - %I:%M %p').strftime('%m/%d/%Y %I:%M%p')
c = data[1 + data.index('Charges:')]
if c.startswith('Note:'):
this_dict['charge_1'] = ''
else:
this_dict['charge_1'] = c
if 'Bond:' in data:
this_dict['intake_bond_cash'] = data[1 + data.index('Bond:')]
for x in soup.find_all('img'):
img_src = x.get('src')
if img_src.startswith('templates/kempercountysheriff.com/images/inmates'):
this_dict['img_src'] = f"https://www.kempercountysheriff.com/{img_src}"
try:
image_url = {'url': this_dict['img_src']}
attachments_array = []
attachments_array.append(image_url)
this_dict['PHOTO'] = attachments_array
except KeyError as err:
print('no image url: ', err)
airtab.insert(this_dict, typecast=True)
new_intakes += 1
else:
update_record(this_dict, soup, m)
wrap_it_up('kcdc', t0, new_intakes, total_intakes)
def hcdc_scraper():
t0, new_intakes, total_intakes = time.time(), 0, 0
main_url = 'http://www.co.hinds.ms.us/pgs/apps/inmate/inmate_list.asp'
try:
r = requests.get(main_url)
except requests.ConnectionError as err:
damn_it(err)
return
soup = BeautifulSoup(r.text, 'html.parser')
total_pages = int(soup.h3.string.split()[3])
pages = list(range(1, total_pages + 1))
for page in pages:
param_str = f"name_sch=Date&SS1=1&ScrollAction=Page+{page}"
r = requests.get(f"{main_url}?{param_str}")
soup = BeautifulSoup(r.text, 'html.parser')
rows = soup.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) == 7:
total_intakes += 1
this_dict = {'jail': 'hcdc', 'linking': ['recJLBoeZlp4IYn4I']}
this_dict['bk'] = row.a.get('href').replace('inmate_detail.asp?ID=', '')
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
m = airtab.match('bk', this_dict['bk'], view='hcdc', fields='recent_text')
if m:
airtab.update(m['id'], this_dict)
else:
this_dict['link'] = urllib.parse.urljoin(main_url, row.a.get('href'))
try:
r = requests.get(this_dict['link'])
except requests.ConnectionError as err:
damn_it(err)
continue
this_dict['img_src'] = f"http://www.co.hinds.ms.us/pgs/inmatephotos/{this_dict['bk']}.jpg"
this_dict['PHOTO'] = []
image_url = {'url': this_dict['img_src']}
this_dict['PHOTO'].append(image_url)
data = []
soup = BeautifulSoup(r.text, 'html.parser')
this_dict['html'] = soup.find_all('table')[1].prettify()
for string in soup.stripped_strings:
data.append(string)
try:
this_dict['recent_text'] = '\n'.join(data[data.index('Name'): data.index('Disclaimer:')])
except ValueError:
this_dict['recent_text'] = ''
try:
get_name(data[1 + data.index('Name')], this_dict)
this_dict['intake_address_line_1'] = data[1 + data.index('Address')]
this_dict['intake_address_line_2'] = data[2 + data.index('Address')]
this_dict['DOB'] = data[1 + data.index('Date of Birth')]
this_dict['sex'] = data[1 + data.index('Sex')]
if data[1 + data.index('Race')] != 'Height':
this_dict['race'] = data[1 + data.index('Race')]
raw_doi = data[1 + data.index('Arrest Date')]
if raw_doi == date.today().strftime('%m/%d/%Y'):
this_dict['DOI'] = datetime.now().strftime('%m/%d/%Y %I:%M%p')
else:
this_dict['DOI'] = f"{raw_doi} 11:59pm"
raw_lea = data[1 + data.index('Arresting Agency')]
this_dict['LEA'] = standardize.hcdc_lea(raw_lea)
this_dict['charge_1'] = data[1 + data.index('Charge 1')]
airtab.insert(this_dict, typecast=True)
new_intakes += 1
except ValueError as err:
print(err, '\n', this_dict['link'])
wrap_it_up('hcdc', t0, new_intakes, total_intakes)
def ccdc_scraper():
t0, new_intakes, total_intakes = time.time(), 0, 0
url = 'http://www.claysheriffms.org/roster.php?grp=10'
docket_pages = set()
docket_pages.add(url)
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
for x in soup.find_all('a'):
y = x.get('href')
if y.startswith('roster.php?grp='):
page = urllib.parse.urljoin(url, y)
docket_pages.add(page)
intakes = []
for page in docket_pages:
r = requests.get(page)
soup = BeautifulSoup(r.text, 'html.parser')
for x in soup.find_all('a'):
link = x.get('href')
if link:
if link.startswith('roster_view.php?booking_num'):
intakes.append(link)
total_intakes = len(intakes)
for x in intakes:
this_dict = {'jail': 'ccdc', 'linking': ['rec9YYU7UkQAwqxZH']}
this_dict['bk'] = x.partition('=')[2]
this_dict['link'] = f"http://www.claysheriffms.org/roster_view.php?booking_num={this_dict['bk']}"
try:
r = requests.get(this_dict['link'])
except requests.ConnectionError as err:
damn_it(err)
continue
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
soup = BeautifulSoup(r.text, 'html.parser').find_all('table')[6]
data = []
for string in soup.stripped_strings:
data.append(string)
messy_text_rn = '\n'.join(data)
this_dict['recent_text'] = messy_text_rn[
0: messy_text_rn.find('Note: ')
].strip()
this_dict['html'] = soup.prettify()
get_name(data[data.index('Booking #:') - 1], this_dict)
this_dict['intake_age'] = int(data[1 + data.index('Age:')])
this_dict['sex'] = data[1 + data.index('Gender:')][:1]
this_dict['race'] = standardize.ccdc_race(raw_race=data[1 + data.index('Race:')])
this_dict['DOI'] = datetime.strptime(data[1 + data.index('Booking Date:')],
'%m-%d-%Y - %I:%M %p').strftime('%m/%d/%Y %I:%M%p')
c = data[1 + data.index('Charges:')]
if c.startswith('Note:') or c.startswith('Bond:'):
this_dict['charge_1'] = ''
else:
this_dict['charge_1'] = c
if 'Bond:' in data:
this_dict['intake_bond_cash'] = data[1 + data.index('Bond:')]
raw_lea = data[1 + data.index('Arresting Agency:')]
m = airtab.match('bk', this_dict['bk'], view='ccdc', fields='recent_text')
if not m:
this_dict['LEA'] = standardize.ccdc_lea(raw_lea)
this_dict[
'img_src'] = f"http://www.claysheriffms.org/templates/claysheriffms.org/images/inmates/{this_dict['bk']}.jpg"
image_url = {'url': this_dict['img_src']}
attachments_array = []
attachments_array.append(image_url)
this_dict['PHOTO'] = attachments_array
airtab.insert(this_dict, typecast=True)
new_intakes += 1
else:
update_record(this_dict, soup, m, standardize.ccdc_lea, raw_lea)
wrap_it_up('ccdc', t0, new_intakes, total_intakes)
def acdc_scraper():
t0 = time.time()
intakes = []
new_intakes, total_intakes = 0, 0
docket_pages = ['http://www.adamscosheriff.org/inmate-roster/']
r = requests.get(docket_pages[0])
soup = BeautifulSoup(r.text, 'html.parser')
x = soup.find_all('a', class_='page-numbers')
page_numbers = range(2, int(x[len(x) - 2].string) + 1)
for n in page_numbers:
url = f"http://www.adamscosheriff.org/inmate-roster/page/{n}/"
docket_pages.append(url)
for page in docket_pages:
r = requests.get(page)
soup = BeautifulSoup(r.text, 'html.parser')
for x in soup.find_all('p', class_='profile-link'):
link = x.a.get('href')
intakes.append(link)
total_intakes = len(intakes)
for intake in intakes:
this_dict = {'jail': 'acdc', 'linking': ['recbrTa5GxR01ySI3']}
data = []
this_dict['link'] = intake
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
try:
r = requests.get(intake)
except requests.ConnectionError as err:
damn_it(err)
continue
soup = BeautifulSoup(r.text, 'html.parser').find('div', class_='blog-content-container')
for string in soup.stripped_strings:
data.append(string)
this_dict['recent_text'] = '\n'.join(data)
this_dict['html'] = soup.prettify()
this_dict['bk'] = data[1 + data.index('Booking Number:')]
if ':' not in data[1 + data.index('Full Name:')]:
get_name(data[1 + data.index('Full Name:')], this_dict)
if ':' not in data[1 + data.index('Age:')]:
this_dict['intake_age'] = int(data[1 + data.index('Age:')])
if ':' not in data[1 + data.index('Gender:')]:
this_dict['sex'] = data[1 + data.index('Gender:')]
if ':' not in data[1 + data.index('Race:')]:
this_dict['race'] = data[1 + data.index('Race:')]
raw_doi = data[1 + data.index('Booking Date:')]
if raw_doi == date.today().strftime('%m/%d/%Y'):
this_dict['DOI'] = datetime.now().strftime('%m/%d/%Y %I:%M%p')
else:
this_dict['DOI'] = f"{raw_doi} 11:59pm"
charges = data[data.index('Charges:'): data.index('Bond:')]
if len(charges) > 1:
this_dict['charges'] = ', '.join(charges[1:])
this_dict['charge_1'] = charges[1]
if data[-1] != 'Bond:':
this_dict['intake_bond_cash'] = data[1 + data.index('Bond:')]
m = airtab.match('bk', this_dict['bk'], view='acdc')
if not m:
if soup.img:
this_dict['img_src'] = soup.img.get('src')
image_url = {'url': this_dict['img_src']}
attachments_array = []
attachments_array.append(image_url)
this_dict['PHOTO'] = attachments_array
else:
print('problem w/ mugshot')
airtab.insert(this_dict, typecast=True)
new_intakes += 1
else:
if 'bk' in this_dict:
update_record(this_dict, soup, m)
wrap_it_up('acdc', t0, new_intakes, total_intakes)
def jcj_scraper():
t0, new_intakes, total_intakes = time.time(), 0, 0
urls = [
'http://jasperso.com/inmate-roster/',
'http://jasperso.com/48-hour-release/',
]
for url in urls:
r = requests.get(url, headers=muh_headers)
soup = BeautifulSoup(r.text, 'html.parser').find('div', id='primary')
intakes = soup.find_all('div', class_='col-sm-4 inmate')
total_intakes += len(intakes)
for x in intakes:
this_dict = {'jail': 'jcj', 'linking': ['recwzuzsimZuPpVR5']}
get_name(x.h1.string.strip(), this_dict)
this_dict['link'] = url
data = []
for string in x.stripped_strings:
data.append(str(string))
this_dict['intake_number'] = data[1 + data.index('Arrest #:')]
this_dict['bk'] = data[1 + data.index('Arrest #:')]
raw_doi = data[1 + data.index('Arrest Date:')]
if raw_doi == date.today().strftime('%m/%d/%Y'):
this_dict['DOI'] = datetime.now().strftime('%m/%d/%Y %I:%M%p')
else:
this_dict['DOI'] = f"{raw_doi} 11:59pm"
if 'Release Date:' in data:
raw_dor = data[1 + data.index('Release Date:')]
if raw_dor == date.today().strftime('%m/%d/%Y'):
this_dict['DOR'] = datetime.now().strftime('%m/%d/%Y %I:%M%p')
else:
this_dict['DOR'] = f"{raw_dor} 12:01am"
this_dict['sex'] = data[1 + data.index('Gender:')].strip()[0:1]
this_dict['race'] = data[1 + data.index('Race:')].strip()[0:1]
this_dict['intake_age'] = int(data[1 + data.index('Age:')])
cleaned_charges = []
charges = data[1 + data.index('Charges:'):]
for charge in charges:
if ', ' in charge:
cleaned_charge = f"\"{charge}\""
else:
cleaned_charge = charge
cleaned_charges.append(cleaned_charge)
this_dict['charges'] = ', '.join(cleaned_charges)
this_dict['recent_text'] = '\n'.join(data)
this_dict['html'] = x.prettify()
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
raw_lea = data[1 + data.index('Arrest Agency:')]
m = airtab.match('bk', this_dict['bk'], view='jcj')
if not m:
this_dict['img_src'] = x.find('img').get('src')
this_dict['PHOTO'] = [{'url': this_dict['img_src']}]
this_dict['LEA'] = standardize.jcj_lea(raw_lea)
airtab.insert(this_dict, typecast=True)
new_intakes += 1
else:
if this_dict['recent_text'] != m['fields']['recent_text']:
this_dict['updated'] = True
this_dict['LEA'] = standardize.jcj_lea(raw_lea)
else:
pass
airtab.update(m['id'], this_dict, typecast=True)
time.sleep(0.2)
wrap_it_up('jcj', t0, new_intakes, total_intakes)
def jcadc_scraper():
t0, new_intakes, total_intakes = time.time(), 0, 0
root = 'https://services.co.jackson.ms.us/jaildocket'
r = requests.post(f"{root}/_inmateList.php?Function=count", headers=muh_headers)
total_intakes = r.json()
last_page = int(total_intakes/ 15)
pages = range(1, last_page + 1)
for pg in pages:
r = requests.post(
f"{root}/_inmateList.php?Function=list&Page={pg}&Order=BookDesc&Search=0",
headers=muh_headers)
intakes = r.json()
for intake in intakes:
data = []
this_dict = {'jail': 'jcadc', 'linking': ['recwShIgdZDcf4ZcJ']}
this_dict['bk'] = intake["Book_Number"]
this_dict['last_verified'] = (datetime.utcnow().replace(tzinfo=timezone.utc).strftime('%m/%d/%Y %H:%M'))
this_dict['intake_number'] = intake["ID_Number"].strip()
this_dict['link'] = f"{root}/inmate/_inmatedetails.php?id={this_dict['intake_number']}"
r = requests.get(this_dict['link'], headers=muh_headers)
soup = BeautifulSoup(r.text, 'html.parser')
for string in soup.stripped_strings:
data.append(string)
this_dict['recent_text'] = '\n'.join(data[1:])
m = airtab.match('bk', this_dict['bk'])
if m:
airtab.update(m['id'], this_dict, typecast=True)
else:
raw_name = f"{intake['Name_First_MI']} {intake['Name_Middle']} {intake['Name_Last']} {intake['Name_Suffix']}"
get_name(raw_name, this_dict)
raw_doi = intake["BookDate"]
if raw_doi == date.today().strftime('%m/%d/%Y'):
this_dict['DOI'] = datetime.now().strftime('%m/%d/%Y %I:%M%p')
else:
this_dict['DOI'] = f"{raw_doi} 11:59pm"
this_dict['DOA'] = intake["ArrestDate"]
this_dict['LEA'] = standardize.jcadc_lea(intake["Arrest_Agency"])
articles = soup.find_all('article')
this_dict['html'] = f"<html>\n<body>\n{articles[0].prettify()}\n{articles[1].prettify()}\n</body>\n</html>"
this_dict['img_src'] = f"{root}/inmate/{this_dict['intake_number']}.jpg"
this_dict['PHOTO'] = [{'url': this_dict['img_src']}]
airtab.insert(this_dict, typecast=True)
new_intakes += 1
wrap_it_up('jcadc', t0, new_intakes, total_intakes)
def main():
fndict = {
'mcdc': mcdc_scraper,
'prcdf': prcdf_scraper,
'lcdc': lcdc_scraper,
'kcdc': kcdc_scraper,
'tcdc': tcdc_scraper,
'acdc': acdc_scraper,
'ccdc': ccdc_scraper,
'jcj': jcj_scraper,
'hcdc': hcdc_scraper,
'jcadc': jcadc_scraper
}
keynames = ['mcdc', 'prcdf', 'lcdc', 'kcdc', 'tcdc', 'acdc', 'ccdc', 'jcj', 'hcdc', 'jcadc']
jails_str = sys.argv[1]
if jails_str == 'all':
jails = keynames
else:
jails = jails_str.split(',')
if len(sys.argv[1:]) == 2:
nap_length = int(sys.argv[2])
else:
nap_length = 0
for jail in jails[:-1]:
fndict[jail.strip()]()
time.sleep(nap_length)
fndict[jails[-1]]()
if __name__ == '__main__':
main()
| []
| []
| [
"jail_scrapers_db",
"AIRTABLE_API_KEY",
"log_db"
]
| [] | ["jail_scrapers_db", "AIRTABLE_API_KEY", "log_db"] | python | 3 | 0 | |
lib/cros_test_lib.py | #!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Cros unit test library, with utility functions."""
from __future__ import print_function
import collections
import contextlib
import cStringIO
import exceptions
import mox
import os
import re
import sys
import unittest
import osutils
import terminal
import cros_build_lib
if 'chromite' not in sys.modules:
# TODO(build): Finish test wrapper (http://crosbug.com/37517).
# Until then, we detect the chromite manipulation not yet having
# occurred, and inject it ourselves.
# We cannot just import chromite since this module is still accessed
# from non chromite.lib.cros_test_lib pathways (which will be resolved
# implicitly via 37517).
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.abspath(__file__)), '../third_party'))
import mock
Directory = collections.namedtuple('Directory', ['name', 'contents'])
def _FlattenStructure(base_path, dir_struct):
"""Converts a directory structure to a list of paths."""
flattened = []
for obj in dir_struct:
if isinstance(obj, Directory):
new_base = os.path.join(base_path, obj.name).rstrip(os.sep)
flattened.append(new_base + os.sep)
flattened.extend(_FlattenStructure(new_base, obj.contents))
else:
assert(isinstance(obj, basestring))
flattened.append(os.path.join(base_path, obj))
return flattened
def CreateOnDiskHierarchy(base_path, dir_struct):
"""Creates on-disk representation of an in-memory directory structure.
Arguments:
base_path: The absolute root of the directory structure.
dir_struct: A recursively defined data structure that represents a
directory tree. The basic form is a list. Elements can be file names or
cros_test_lib.Directory objects. The 'contents' attribute of Directory
types is a directory structure representing the contents of the directory.
Examples:
- ['file1', 'file2']
- ['file1', Directory('directory', ['deepfile1', 'deepfile2']), 'file2']
"""
flattened = _FlattenStructure(base_path, dir_struct)
for f in flattened:
f = os.path.join(base_path, f)
if f.endswith(os.sep):
os.mkdir(f)
else:
osutils.Touch(f, makedirs=True)
def _VerifyDirectoryIterables(existing, expected):
"""Compare two iterables representing contents of a directory.
Paths in |existing| and |expected| will be compared for exact match.
Arguments:
existing: An iterable containing paths that exist.
expected: An iterable of paths that are expected.
Raises:
AssertionError when there is any divergence between |existing| and
|expected|.
"""
def FormatPaths(paths):
return '\n'.join(sorted(paths))
existing = set(existing)
expected = set(expected)
unexpected = existing - expected
if unexpected:
raise AssertionError('Found unexpected paths:\n%s'
% FormatPaths(unexpected))
missing = expected - existing
if missing:
raise AssertionError('These files were expected but not found:\n%s'
% FormatPaths(missing))
def _DirectoryIterator(base_path):
"""Iterates through the files and subdirs of a directory."""
for root, dirs, files in os.walk(base_path):
for e in [d + os.sep for d in dirs] + files:
yield os.path.join(root, e)
def VerifyOnDiskHierarchy(base_path, dir_struct):
"""Verify that an on-disk directory tree exactly matches a given structure.
Arguments:
See arguments of CreateOnDiskHierarchy()
Raises:
AssertionError when there is any divergence between the on-disk
structure and the structure specified by 'dir_struct'.
"""
expected = _FlattenStructure(base_path, dir_struct)
_VerifyDirectoryIterables(_DirectoryIterator(base_path), expected)
def VerifyTarball(tarball, dir_struct):
"""Compare the contents of a tarball against a directory structure.
Arguments:
tarball: Path to the tarball.
dir_struct: See CreateOnDiskHierarchy()
Raises:
AssertionError when there is any divergence between the tarball and the
structure specified by 'dir_struct'.
"""
contents = cros_build_lib.RunCommandCaptureOutput(
['tar', '-tf', tarball]).output.splitlines()
normalized = set()
for p in contents:
norm = os.path.normpath(p)
if p.endswith('/'):
norm += '/'
if norm in normalized:
raise AssertionError('Duplicate entry %r found in %r!' % (norm, tarball))
normalized.add(norm)
expected = _FlattenStructure('', dir_struct)
_VerifyDirectoryIterables(normalized, expected)
def _walk_mro_stacking(obj, attr, reverse=False):
iterator = iter if reverse else reversed
methods = (getattr(x, attr, None) for x in iterator(obj.__class__.__mro__))
seen = set()
for x in filter(None, methods):
x = getattr(x, 'im_func', x)
if x not in seen:
seen.add(x)
yield x
def _stacked_setUp(self):
self.__test_was_run__ = False
try:
for target in _walk_mro_stacking(self, '__raw_setUp__'):
target(self)
except:
# TestCase doesn't trigger tearDowns if setUp failed; thus
# manually force it ourselves to ensure cleanup occurs.
_stacked_tearDown(self)
raise
# Now mark the object as fully setUp; this is done so that
# any last minute assertions in tearDown can know if they should
# run or not.
self.__test_was_run__ = True
def _stacked_tearDown(self):
exc_info = None
for target in _walk_mro_stacking(self, '__raw_tearDown__', True):
#pylint: disable=W0702
try:
target(self)
except:
# Preserve the exception, throw it after running
# all tearDowns; we throw just the first also. We suppress
# pylint's warning here since it can't understand that we're
# actually raising the exception, just in a nonstandard way.
if exc_info is None:
exc_info = sys.exc_info()
if exc_info:
# Chuck the saved exception, w/ the same TB from
# when it occurred.
raise exc_info[0], exc_info[1], exc_info[2]
class StackedSetup(type):
"""Metaclass that extracts automatically stacks setUp and tearDown calls.
Basically this exists to make it easier to do setUp *correctly*, while also
suppressing some unittests misbehaviours- for example, the fact that if a
setUp throws an exception the corresponding tearDown isn't ran. This sorts
it.
Usage of it is via usual metaclass approach; just set
`__metaclass__ = StackedSetup` .
Note that this metaclass is designed such that because this is a metaclass,
rather than just a scope mutator, all derivative classes derive from this
metaclass; thus all derivative TestCase classes get automatic stacking."""
def __new__(mcs, name, bases, scope):
if 'setUp' in scope:
scope['__raw_setUp__'] = scope.pop('setUp')
scope['setUp'] = _stacked_setUp
if 'tearDown' in scope:
scope['__raw_tearDown__'] = scope.pop('tearDown')
scope['tearDown'] = _stacked_tearDown
return type.__new__(mcs, name, bases, scope)
class EasyAttr(dict):
"""Convenient class for simulating objects with attributes in tests.
An EasyAttr object can be created with any attributes initialized very
easily. Examples:
1) An object with .id=45 and .name="Joe":
testobj = EasyAttr(id=45, name="Joe")
2) An object with .title.text="Big" and .owner.text="Joe":
testobj = EasyAttr(title=EasyAttr(text="Big"), owner=EasyAttr(text="Joe"))
"""
__slots__ = ()
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
return AttributeError(attr)
def __delattr__(self, attr):
try:
self.pop(attr)
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
self[attr] = value
def __dir__(self):
return self.keys()
class OutputCapturer(object):
"""Class with limited support for capturing test stdout/stderr output.
Class is designed as a 'ContextManager'. Example usage in a test method
of an object of TestCase:
with self.OutputCapturer() as output:
# Capturing of stdout/stderr automatically starts now.
# Do stuff that sends output to stdout/stderr.
# Capturing automatically stops at end of 'with' block.
# stdout/stderr can be retrieved from the OutputCapturer object:
stdout = output.getStdoutLines() # Or other access methods
# Some Assert methods are only valid if capturing was used in test.
self.AssertOutputContainsError() # Or other related methods
"""
# These work with error output from operation module.
OPER_MSG_SPLIT_RE = re.compile(r'^\033\[1;.*?\033\[0m$|^[^\n]*$',
re.DOTALL | re.MULTILINE)
ERROR_MSG_RE = re.compile(r'^\033\[1;%dm(.+?)(?:\033\[0m)+$' %
(30 + terminal.Color.RED,), re.DOTALL)
WARNING_MSG_RE = re.compile(r'^\033\[1;%dm(.+?)(?:\033\[0m)+$' %
(30 + terminal.Color.YELLOW,), re.DOTALL)
__slots__ = ['_stderr', '_stderr_cap', '_stdout', '_stdout_cap']
def __init__(self):
self._stdout = None
self._stderr = None
self._stdout_cap = None
self._stderr_cap = None
def __enter__(self):
# This method is called with entering 'with' block.
self.StartCapturing()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# This method is called when exiting 'with' block.
self.StopCapturing()
if exc_type:
print('Exception during output capturing: %r' % (exc_val,))
stdout = self.GetStdout()
if stdout:
print('Captured stdout was:\n%s' % stdout)
else:
print('No captured stdout')
stderr = self.GetStderr()
if stderr:
print('Captured stderr was:\n%s' % stderr)
else:
print('No captured stderr')
def StartCapturing(self):
"""Begin capturing stdout and stderr."""
self._stdout = sys.stdout
self._stderr = sys.stderr
sys.stdout = self._stdout_cap = cStringIO.StringIO()
sys.stderr = self._stderr_cap = cStringIO.StringIO()
def StopCapturing(self):
"""Stop capturing stdout and stderr."""
# The only reason to check stdout or stderr separately might
# have capturing on independently is if StartCapturing did not complete.
if self._stdout:
sys.stdout = self._stdout
self._stdout = None
if self._stderr:
sys.stderr = self._stderr
self._stderr = None
def ClearCaptured(self):
# Only valid if capturing is not on.
assert self._stdout is None and self._stderr is None
self._stdout_cap = None
self._stderr_cap = None
def GetStdout(self):
"""Return captured stdout so far."""
return self._stdout_cap.getvalue()
def GetStderr(self):
"""Return captured stderr so far."""
return self._stderr_cap.getvalue()
def _GetOutputLines(self, output, include_empties):
"""Split |output| into lines, optionally |include_empties|.
Return array of lines.
"""
lines = self.OPER_MSG_SPLIT_RE.findall(output)
if not include_empties:
lines = [ln for ln in lines if ln]
return lines
def GetStdoutLines(self, include_empties=True):
"""Return captured stdout so far as array of lines.
If |include_empties| is false filter out all empty lines.
"""
return self._GetOutputLines(self.GetStdout(), include_empties)
def GetStderrLines(self, include_empties=True):
"""Return captured stderr so far as array of lines.
If |include_empties| is false filter out all empty lines.
"""
return self._GetOutputLines(self.GetStderr(), include_empties)
class TestCase(unittest.TestCase):
__metaclass__ = StackedSetup
# List of vars chromite is globally sensitive to and that should
# be suppressed for tests.
ENVIRON_VARIABLE_SUPPRESSIONS = ('CROS_CACHEDIR',)
def __init__(self, *args, **kwds):
unittest.TestCase.__init__(self, *args, **kwds)
# This is set to keep pylint from complaining.
self.__test_was_run__ = False
def setUp(self):
self.__saved_env__ = os.environ.copy()
self.__saved_cwd__ = os.getcwd()
self.__saved_umask__ = os.umask(022)
for x in self.ENVIRON_VARIABLE_SUPPRESSIONS:
os.environ.pop(x, None)
def tearDown(self):
osutils.SetEnvironment(self.__saved_env__)
os.chdir(self.__saved_cwd__)
os.umask(self.__saved_umask__)
def assertRaises2(self, exception, functor, *args, **kwargs):
"""Like assertRaises, just with checking of the excpetion.
args:
exception: The expected exception type to intecept.
functor: The function to invoke.
args: Positional args to pass to the function.
kwargs: Optional args to pass to the function. Note we pull
exact_kls, msg, and check_attrs from these kwargs.
exact_kls: If given, the exception raise must be *exactly* that class
type; derivatives are a failure.
check_attrs: If given, a mapping of attribute -> value to assert on
the resultant exception. Thus if you wanted to catch a ENOENT, you
would do:
assertRaises2(EnvironmentError, func, args,
attrs={"errno":errno.ENOENT})
msg: The error message to be displayed if the exception isn't raised.
If not given, a suitable one is defaulted to.
returns: The exception object.
"""
exact_kls = kwargs.pop("exact_kls", None)
check_attrs = kwargs.pop("check_attrs", {})
msg = kwargs.pop("msg", None)
if msg is None:
msg = ("%s(*%r, **%r) didn't throw an exception"
% (functor.__name__, args, kwargs))
try:
functor(*args, **kwargs)
raise AssertionError(msg)
except exception, e:
if exact_kls:
self.assertEqual(e.__class__, exception)
bad = []
for attr, required in check_attrs.iteritems():
self.assertTrue(hasattr(e, attr),
msg="%s lacks attr %s" % (e, attr))
value = getattr(e, attr)
if value != required:
bad.append("%s attr is %s, needed to be %s"
% (attr, value, required))
if bad:
raise AssertionError("\n".join(bad))
return e
class OutputTestCase(TestCase):
"""Base class for cros unit tests with utility methods."""
def __init__(self, *args, **kwds):
"""Base class __init__ takes a second argument."""
TestCase.__init__(self, *args, **kwds)
self._output_capturer = None
def OutputCapturer(self):
"""Create and return OutputCapturer object."""
self._output_capturer = OutputCapturer()
return self._output_capturer
def _GetOutputCapt(self):
"""Internal access to existing OutputCapturer.
Raises RuntimeError if output capturing was never on.
"""
if self._output_capturer:
return self._output_capturer
raise RuntimeError('Output capturing was never turned on for this test.')
def _GenCheckMsgFunc(self, prefix_re, line_re):
"""Return boolean func to check a line given |prefix_re| and |line_re|."""
def _method(line):
if prefix_re:
# Prefix regexp will strip off prefix (and suffix) from line.
match = prefix_re.search(line)
if match:
line = match.group(1)
else:
return False
return line_re.search(line) if line_re else True
# Provide a description of what this function looks for in a line. Error
# messages can make use of this.
_method.description = None
if prefix_re and line_re:
_method.description = ('line matching prefix regexp %r then regexp %r' %
(prefix_re.pattern, line_re.pattern))
elif prefix_re:
_method.description = 'line matching prefix regexp %r' % prefix_re.pattern
elif line_re:
_method.description = 'line matching regexp %r' % line_re.pattern
else:
raise RuntimeError('Nonsensical usage of _GenCheckMsgFunc: '
'no prefix_re or line_re')
return _method
def _ContainsMsgLine(self, lines, msg_check_func):
return any(msg_check_func(ln) for ln in lines)
def _GenOutputDescription(self, check_stdout, check_stderr):
# Some extra logic to make an error message useful.
if check_stdout and check_stderr:
return 'stdout or stderr'
elif check_stdout:
return 'stdout'
elif check_stderr:
return 'stderr'
def _AssertOutputContainsMsg(self, check_msg_func, invert,
check_stdout, check_stderr):
assert check_stdout or check_stderr
lines = []
if check_stdout:
lines.extend(self._GetOutputCapt().GetStdoutLines())
if check_stderr:
lines.extend(self._GetOutputCapt().GetStderrLines())
result = self._ContainsMsgLine(lines, check_msg_func)
# Some extra logic to make an error message useful.
output_desc = self._GenOutputDescription(check_stdout, check_stderr)
if invert:
msg = ('expected %s to not contain %s,\nbut found it in:\n%s' %
(output_desc, check_msg_func.description, lines))
self.assertFalse(result, msg=msg)
else:
msg = ('expected %s to contain %s,\nbut did not find it in:\n%s' %
(output_desc, check_msg_func.description, lines))
self.assertTrue(result, msg=msg)
def AssertOutputContainsError(self, regexp=None, invert=False,
check_stdout=True, check_stderr=False):
"""Assert requested output contains at least one error line.
If |regexp| is non-null, then the error line must also match it.
If |invert| is true, then assert the line is NOT found.
Raises RuntimeError if output capturing was never one for this test.
"""
check_msg_func = self._GenCheckMsgFunc(OutputCapturer.ERROR_MSG_RE, regexp)
return self._AssertOutputContainsMsg(check_msg_func, invert,
check_stdout, check_stderr)
def AssertOutputContainsWarning(self, regexp=None, invert=False,
check_stdout=True, check_stderr=False):
"""Assert requested output contains at least one warning line.
If |regexp| is non-null, then the warning line must also match it.
If |invert| is true, then assert the line is NOT found.
Raises RuntimeError if output capturing was never one for this test.
"""
check_msg_func = self._GenCheckMsgFunc(OutputCapturer.WARNING_MSG_RE,
regexp)
return self._AssertOutputContainsMsg(check_msg_func, invert,
check_stdout, check_stderr)
def AssertOutputContainsLine(self, regexp, invert=False,
check_stdout=True, check_stderr=False):
"""Assert requested output contains line matching |regexp|.
If |invert| is true, then assert the line is NOT found.
Raises RuntimeError if output capturing was never one for this test.
"""
check_msg_func = self._GenCheckMsgFunc(None, regexp)
return self._AssertOutputContainsMsg(check_msg_func, invert,
check_stdout, check_stderr)
def _AssertOutputEndsInMsg(self, check_msg_func,
check_stdout, check_stderr):
"""Pass if requested output(s) ends(end) with an error message."""
assert check_stdout or check_stderr
lines = []
if check_stdout:
stdout_lines = self._GetOutputCapt().GetStdoutLines(include_empties=False)
if stdout_lines:
lines.append(stdout_lines[-1])
if check_stderr:
stderr_lines = self._GetOutputCapt().GetStderrLines(include_empties=False)
if stderr_lines:
lines.append(stderr_lines[-1])
result = self._ContainsMsgLine(lines, check_msg_func)
# Some extra logic to make an error message useful.
output_desc = self._GenOutputDescription(check_stdout, check_stderr)
msg = ('expected %s to end with %s,\nbut did not find it in:\n%s' %
(output_desc, check_msg_func.description, lines))
self.assertTrue(result, msg=msg)
def AssertOutputEndsInError(self, regexp=None,
check_stdout=True, check_stderr=False):
"""Assert requested output ends in error line.
If |regexp| is non-null, then the error line must also match it.
Raises RuntimeError if output capturing was never one for this test.
"""
check_msg_func = self._GenCheckMsgFunc(OutputCapturer.ERROR_MSG_RE, regexp)
return self._AssertOutputEndsInMsg(check_msg_func,
check_stdout, check_stderr)
def AssertOutputEndsInWarning(self, regexp=None,
check_stdout=True, check_stderr=False):
"""Assert requested output ends in warning line.
If |regexp| is non-null, then the warning line must also match it.
Raises RuntimeError if output capturing was never one for this test.
"""
check_msg_func = self._GenCheckMsgFunc(OutputCapturer.WARNING_MSG_RE,
regexp)
return self._AssertOutputEndsInMsg(check_msg_func,
check_stdout, check_stderr)
def AssertOutputEndsInLine(self, regexp,
check_stdout=True, check_stderr=False):
"""Assert requested output ends in line matching |regexp|.
Raises RuntimeError if output capturing was never one for this test.
"""
check_msg_func = self._GenCheckMsgFunc(None, regexp)
return self._AssertOutputEndsInMsg(check_msg_func,
check_stdout, check_stderr)
def FuncCatchSystemExit(self, func, *args, **kwargs):
"""Run |func| with |args| and |kwargs| and catch exceptions.SystemExit.
Return tuple (return value or None, SystemExit number code or None).
"""
try:
returnval = func(*args, **kwargs)
return returnval, None
except exceptions.SystemExit as ex:
exit_code = ex.args[0]
return None, exit_code
def AssertFuncSystemExitZero(self, func, *args, **kwargs):
"""Run |func| with |args| and |kwargs| catching exceptions.SystemExit.
If the func does not raise a SystemExit with exit code 0 then assert.
"""
exit_code = self.FuncCatchSystemExit(func, *args, **kwargs)[1]
self.assertFalse(exit_code is None,
msg='Expected system exit code 0, but caught none')
self.assertTrue(exit_code == 0,
msg='Expected system exit code 0, but caught %d' %
exit_code)
def AssertFuncSystemExitNonZero(self, func, *args, **kwargs):
"""Run |func| with |args| and |kwargs| catching exceptions.SystemExit.
If the func does not raise a non-zero SystemExit code then assert.
"""
exit_code = self.FuncCatchSystemExit(func, *args, **kwargs)[1]
self.assertFalse(exit_code is None,
msg='Expected non-zero system exit code, but caught none')
self.assertFalse(exit_code == 0,
msg='Expected non-zero system exit code, but caught %d' %
exit_code)
def AssertRaisesAndReturn(self, error, func, *args, **kwargs):
"""Like assertRaises, but return exception raised."""
try:
func(*args, **kwargs)
self.assertTrue(False, msg='Expected %s but got none' % error)
except error as ex:
return ex
class TempDirTestCase(TestCase):
"""Mixin used to give each test a tempdir that is cleansed upon finish"""
sudo_cleanup = False
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
self.tempdir = None
def setUp(self):
#pylint: disable=W0212
osutils._TempDirSetup(self)
def tearDown(self):
#pylint: disable=W0212
osutils._TempDirTearDown(self, self.sudo_cleanup)
class _RunCommandMock(mox.MockObject):
"""Custom mock class used to suppress arguments we don't care about"""
DEFAULT_IGNORED_ARGS = ('print_cmd',)
def __call__(self, *args, **kwds):
for arg in self.DEFAULT_IGNORED_ARGS:
kwds.setdefault(arg, mox.IgnoreArg())
return mox.MockObject.__call__(self, *args, **kwds)
class LessAnnoyingMox(mox.Mox):
"""Mox derivative that slips in our suppressions to mox.
This is used by default via MoxTestCase; namely, this suppresses
certain arguments awareness that we don't care about via switching
in (dependent on the namespace requested) overriding MockObject
classing.
Via this, it makes maintenance much simpler- simplest example, if code
doesn't explicitly assert that print_cmd must be true/false... then
we don't care about what argument is set (it has no effect beyond output).
Mox normally *would* care, making it a pita to maintain. This selectively
suppresses that awareness, making it maintainable.
"""
mock_classes = {}.fromkeys(
['chromite.lib.cros_build_lib.%s' % x
for x in dir(cros_build_lib) if "RunCommand" in x],
_RunCommandMock)
@staticmethod
def _GetNamespace(obj):
return '%s.%s' % (obj.__module__, obj.__name__)
def CreateMock(self, obj, attrs=None):
if attrs is None:
attrs = {}
kls = self.mock_classes.get(
self._GetNamespace(obj), mox.MockObject)
# Copy attrs; I don't trust mox to not be stupid here.
new_mock = kls(obj, attrs=attrs)
self._mock_objects.append(new_mock)
return new_mock
class MoxTestCase(TestCase):
"""Mox based test case; compatible with StackedSetup"""
mox_suppress_verify_all = False
def setUp(self):
self.mox = LessAnnoyingMox()
self.stubs = mox.stubout.StubOutForTesting()
def tearDown(self):
try:
if self.__test_was_run__ and not self.mox_suppress_verify_all:
# This means the test code was actually ran.
# force a verifyall
self.mox.VerifyAll()
finally:
if hasattr(self, 'mox'):
self.mox.UnsetStubs()
if hasattr(self, 'stubs'):
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
class MoxTempDirTestCase(TempDirTestCase, MoxTestCase):
"""Convenience class mixing TempDir and Mox"""
class MoxOutputTestCase(OutputTestCase, MoxTestCase):
"""Conevenience class mixing OutputTestCase and MoxTestCase."""
class MockTestCase(TestCase):
"""Python-mock based test case; compatible with StackedSetup"""
def setUp(self):
self._patchers = []
def tearDown(self):
# We can't just run stopall() by itself, and need to stop our patchers
# manually since stopall() doesn't handle repatching.
cros_build_lib.SafeRun([p.stop for p in reversed(self._patchers)] +
[mock.patch.stopall])
def StartPatcher(self, patcher):
"""Call start() on the patcher, and stop() in tearDown."""
m = patcher.start()
self._patchers.append(patcher)
return m
def PatchObject(self, *args, **kwargs):
"""Create and start a mock.patch.object().
stop() will be called automatically during tearDown.
"""
return self.StartPatcher(mock.patch.object(*args, **kwargs))
# MockTestCase must be before TempDirTestCase in this inheritance order,
# because MockTestCase.StartPatcher() calls may be for PartialMocks, which
# create their own temporary directory. The teardown for those directories
# occurs during MockTestCase.tearDown(), which needs to be run before
# TempDirTestCase.tearDown().
class MockTempDirTestCase(MockTestCase, TempDirTestCase):
"""Convenience class mixing TempDir and Mock."""
def FindTests(directory, module_namespace=''):
"""Find all *_unittest.py, and return their python namespaces.
Args:
directory: The directory to scan for tests.
module_namespace: What namespace to prefix all found tests with.
Returns:
A list of python unittests in python namespace form.
"""
results = cros_build_lib.RunCommandCaptureOutput(
['find', '.', '-name', '*_unittest.py', '-printf', '%P\n'],
cwd=directory, print_cmd=False).output.splitlines()
# Drop the trailing .py, inject in the name if one was given.
if module_namespace:
module_namespace += '.'
return [module_namespace + x[:-3].replace('/', '.') for x in results]
@contextlib.contextmanager
def DisableLogging():
"""Temporarily disable chromite logging."""
backup = cros_build_lib.logger.disabled
try:
cros_build_lib.logger.disabled = True
yield
finally:
cros_build_lib.logger.disabled = backup
def main(**kwds):
"""Helper wrapper around unittest.main. Invoke this, not unittest.main.
Any passed in kwds are passed directly down to unittest.main; via this, you
can inject custom argv for example (to limit what tests run).
"""
# Default to exit=True; this matches old behaviour, and allows unittest
# to trigger sys.exit on its own. Unfortunately, the exit keyword is only
# available in 2.7- as such, handle it ourselves.
allow_exit = kwds.pop('exit', True)
cros_build_lib.SetupBasicLogging()
try:
unittest.main(**kwds)
raise SystemExit(0)
except SystemExit, e:
if e.__class__ != SystemExit or allow_exit:
raise
# Redo the exit code ourselves- unittest throws True on occasion.
# This is why the lack of typing for SystemExit code attribute makes life
# suck, in parallel to unittest being special.
# Finally, note that it's possible for code to be a string...
if isinstance(e.code, (int, long)):
# This is done since exit code may be something other than 1/0; if they
# explicitly pass it, we'll honor it.
return e.code
return 1 if e.code else 0
| []
| []
| []
| [] | [] | python | 0 | 0 | |
analysis/project.go | package analysis
import (
"os"
"io"
"log"
"fmt"
"bufio"
"path/filepath"
"strings"
"strconv"
"regexp"
"context"
"time"
"github.com/google/zoekt/contrib"
)
var (
P4_BIN string
GIT_BIN string
CTAGS_BIN string
CYGWIN_BASE_DIR string
CYGWIN_ON bool
)
func init() {
P4_BIN = os.Getenv("ZOEKT_P4_BIN")
GIT_BIN = os.Getenv("ZOEKT_GIT_BIN")
CTAGS_BIN = os.Getenv("ZOEKT_CTAGS_BIN")
CYGWIN_BASE_DIR = os.Getenv("ZOEKT_CYGWIN_BASE_DIR")
CYGWIN_ON = CYGWIN_BASE_DIR != ""
}
// IProject project operator interface
type IProject interface {
GetName() string
GetBaseDir() string
GetMetadataDir() string
Sync() (map[string]string, error) // return filepath to store latest modified file list
Compile() error // virtually compile project; store metadata into disk: dump commit message, build ast tree ...
GetProjectType() string // return p4, git, ...
GetFileTextContents(path, revision string) (string, error)
GetFileBinaryContents(path, revision string) ([]byte, error)
GetFileLength(path, revision string) (int64, error)
GetFileHash(path, revision string) (string, error)
GetFileBlameInfo(path, revision string, startLine, endLine int) ([]*BlameDetails, error)
GetFileCommitInfo(path string, offset, N int) ([]string, error) // N = -1 for dumping all
GetDirContents(path, revision string) ([]string, error)
// commit
GetCommitDetails(commitId string) (*CommitDetails, error)
SearchCommits(ctx context.Context, query string, num int) (*contrib.SearchResult, error)
}
type BlameDetails struct {
Author string `json:"author"`
Commit string `json:"commit"`
Timestamp int64 `json:"timestamp"`
}
var _ IProject = &P4Project{}
var _ IProject = &GitProject{}
func ListProjects (baseDir string) ([]string, error) {
list := make([]string, 0)
dir, err := os.Open(baseDir)
if err != nil { return nil, err }
defer dir.Close()
files, err := dir.Readdir(-1)
if err != nil { return nil, err }
for _, file := range files {
if !file.IsDir() { continue }
name := file.Name()
if strings.HasPrefix(name, ".") || strings.HasPrefix(name, "_") {
continue
}
list = append(list, name)
}
return list, nil
}
func cygwinPath (baseDir, cygwinBaseDir string) string {
cygwinBaseDir, err := filepath.Abs(cygwinBaseDir)
if err != nil {
return baseDir
}
if strings.HasPrefix(baseDir, cygwinBaseDir) {
rcbd := []rune(cygwinBaseDir)
rbd := []rune(baseDir)
return strings.Replace(string(rbd[len(rcbd):]), string(filepath.Separator), "/", -1)
}
return baseDir
}
func NewProject (projectName string, baseDir string) IProject {
baseDir, err := filepath.Abs(baseDir)
if err != nil {
return nil
}
info, err := os.Stat(baseDir)
if err != nil {
return nil
}
options := make(map[string]string)
// git project:
// - .git
gitGuess := filepath.Join(baseDir, ".git")
info, err = os.Stat(gitGuess)
if err == nil {
if !info.IsDir() {
return nil
}
getGitProjectOptions(baseDir, &options)
return NewGitProject(projectName, baseDir, options)
}
// p4 project:
// - .p4
p4Guess := filepath.Join(baseDir, ".p4")
info, err = os.Stat(p4Guess)
if err == nil {
if !info.IsDir() {
return nil
}
getP4ProjectOptions(baseDir, &options)
return NewP4Project(projectName, baseDir, options)
}
// not support yet
return nil
}
var gitRemoteMatcher = regexp.MustCompile(`^origin\s+(.*)\s+\([a-z]+\)$`)
func getGitProjectOptions(baseDir string, options *map[string]string) {
if CYGWIN_ON {
baseDir = cygwinPath(baseDir, CYGWIN_BASE_DIR)
}
cmd := fmt.Sprintf("%s -C %s remote -v", GIT_BIN, baseDir)
contrib.Exec2Lines(cmd, func (line string) {
parts := gitRemoteMatcher.FindStringSubmatch(line)
if parts == nil {
return
}
(*options)["Url"] = parts[1]
})
}
func getP4ProjectOptions(baseDir string, options *map[string]string) {
configFilepath := filepath.Join(baseDir, ".p4", "config")
f, err := os.Open(configFilepath)
if err != nil {
return
}
defer f.Close()
// config file max size is 4KB
buf := make([]byte, 4096)
n, err := f.Read(buf)
if err != nil {
return
}
for _, keyval := range strings.Split(string(buf[0:n]), "\n") {
if keyval == "" {
continue
}
parts := strings.SplitN(keyval, "=", 2)
(*options)[parts[0]] = parts[1]
}
}
// P4Project //////////////////////////////////////////////////////////////////
type P4Project struct {
Name string
BaseDir string
P4Port, P4User, P4Client string
P4Details p4Details
}
type p4Details struct {
Root string
Owner string
Views map[string]string
}
func NewP4Project (projectName string, baseDir string, options map[string]string) *P4Project {
if P4_BIN == "" {
log.Panic("[E] ! cannot find p4 command")
}
// baseDir: absolute path
port, ok := options["P4PORT"]
if !ok {
log.Printf("P/%s: [E] missing P4PORT\n", projectName)
return nil
}
user, ok := options["P4USER"]
if !ok {
log.Printf("P/%s: [E] missing P4USER\n", projectName)
return nil
}
client, ok := options["P4CLIENT"]
if !ok {
log.Printf("P/%s: [E] missing P4CLIENT\n", projectName)
return nil
}
p := &P4Project{projectName, baseDir, port, user, client, p4Details{}};
p.getDetails()
return p
}
func (p *P4Project) GetName () string {
return p.Name
}
func (p *P4Project) GetBaseDir () string {
return p.BaseDir
}
func (p *P4Project) GetMetadataDir () string {
return filepath.Join(p.BaseDir, ".p4")
}
var p4DetailRootMather = regexp.MustCompile(`^Root:\s+(.+)$`)
var p4DetailOwnerMather = regexp.MustCompile(`^Owner:\s+(.+)$`)
var p4DetailViewMather = regexp.MustCompile(`^View:$`)
// TODO: only support view map like //depot/path/to/... //client/path/to/...
// not support //depot/path/to/file //client/path/to/file
// not support -//depot/path/to/... //client/path/to/file
var p4DetailViewLineMather = regexp.MustCompile(`^\s(//.+/)\.{3}\s+(//.+/)\.{3}$`)
func p4clientLineParse(p *P4Project, line string, viewMapLines *bool, output *os.File) {
// output for cache detail lines
if output != nil {
// XXX: we ingore write error?
output.WriteString(line)
output.WriteString("\n")
}
if strings.HasPrefix(line, "#") {
return
}
if *viewMapLines {
viewMap := p4DetailViewLineMather.FindStringSubmatch(line)
if viewMap != nil {
localPath := strings.TrimPrefix(viewMap[2], fmt.Sprintf("//%s/", p.P4Client))
if filepath.Separator == '\\' {
localPath = strings.ReplaceAll(localPath, "/", "\\")
}
localPath = fmt.Sprintf("%s%s", p.P4Details.Root, localPath)
p.P4Details.Views[viewMap[1]] = localPath
}
return
}
parts := p4DetailRootMather.FindStringSubmatch(line)
if parts != nil {
p.P4Details.Root = strings.TrimRight(parts[1], string(filepath.Separator)) + string(filepath.Separator)
return
}
parts = p4DetailOwnerMather.FindStringSubmatch(line)
if parts != nil {
p.P4Details.Owner = parts[1]
return
}
parts = p4DetailViewMather.FindStringSubmatch(line)
if parts != nil {
*viewMapLines = true
p.P4Details.Views = make(map[string]string)
return
}
}
func (p *P4Project) getDetails_cached () error {
targetDir := filepath.Join(p.BaseDir, ".p4", ".zoekt", "cache")
err := contrib.PrepareDirectory(targetDir)
if err != nil {
return err
}
targetPath := filepath.Join(targetDir, "remote")
f, err := os.Open(targetPath)
if err != nil {
return err
}
defer f.Close()
scanner := bufio.NewScanner(f)
viewMapLines := false
for scanner.Scan() {
p4clientLineParse(p, scanner.Text(), &viewMapLines, nil)
}
if err = scanner.Err(); err != nil {
return err
}
return nil
}
func (p *P4Project) getDetails () error {
err := p.getDetails_cached()
if err == nil {
return nil
}
cmd := fmt.Sprintf(
"P4PORT=%s P4USER=%s P4CLIENT=%s %s client -o",
p.P4Port, p.P4User, p.P4Client, P4_BIN,
)
contrib.PrintDebugCommand(cmd)
detailCacheFilename := filepath.Join(p.BaseDir, ".p4", ".zoekt", "cache", "remote")
f, err := os.Create(detailCacheFilename)
if err == nil {
defer f.Close()
} else {
f = nil
}
viewMapLines := false
err = contrib.Exec2Lines(cmd, func (line string) {
p4clientLineParse(p, line, &viewMapLines, f)
})
return err
}
func (p *P4Project) prepareP4folder () error {
p4folder := filepath.Join(p.BaseDir, ".p4")
err := contrib.PrepareDirectory(p4folder)
if err != nil {
return nil
}
p4config := filepath.Join(p4folder, "config")
f, err := os.Create(p4config)
if err != nil {
return err
}
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("P4PORT=%s\nP4USER=%s\nP4CLIENT=%s\n", p.P4Port, p.P4User, p.P4Client))
if err != nil {
return err
}
return nil
}
// p4 output e.g. //depot/b#1 - added as /path/to/b
var p4SyncLineMatcher = regexp.MustCompile(`^(.*)#(\d+) - (\w+) as (.*)$`)
// when we manually remove all files in a client
// and then do a force sync, p4 will output delete all files
// and refreshing them ...
var p4SyncLineRefreshMatcher = regexp.MustCompile(`^(.*)#(\d+) - refreshing (.*)$`)
func (p *P4Project) extractSyncPath(line string, updatedList *map[string]string) {
parts := p4SyncLineMatcher.FindStringSubmatch(line)
if parts != nil {
filename := strings.TrimPrefix(parts[4], p.BaseDir)
(*updatedList)[filename] = parts[3]
return
}
parts = p4SyncLineRefreshMatcher.FindStringSubmatch(line)
if parts != nil {
filename := strings.TrimPrefix(parts[3], p.BaseDir)
(*updatedList)[filename] = "added"
}
}
func (p *P4Project) clone (updatedList *map[string]string) error {
cmd := fmt.Sprintf(
"P4PORT=%s P4USER=%s P4CLIENT=%s %s sync -f",
p.P4Port, p.P4User, p.P4Client, P4_BIN,
)
contrib.PrintDebugCommand(cmd)
err := contrib.Exec2Lines(cmd, nil)
doWalk(p.BaseDir, ".p4", updatedList)
err = p.prepareP4folder()
return err
}
func (p *P4Project) sync (updatedList *map[string]string) error {
cmd := fmt.Sprintf(
"P4PORT=%s P4USER=%s P4CLIENT=%s %s sync",
p.P4Port, p.P4User, p.P4Client, P4_BIN,
)
contrib.PrintDebugCommand(cmd)
err := contrib.Exec2Lines(cmd, func (line string) {
p.extractSyncPath(line, updatedList)
})
return err
}
func (p *P4Project) Sync () (map[string]string, error) {
updatedList := make(map[string]string)
fileinfo, err := os.Stat(p.BaseDir)
if os.IsNotExist(err) {
err = p.clone(&updatedList)
return updatedList, err
}
if err != nil {
return updatedList, err
}
if !fileinfo.IsDir() {
return updatedList, fmt.Errorf("P/%s: [E] cannot clone repo since \"%s\" is not a directory", p.Name)
}
err = p.sync(&updatedList)
return updatedList, err
}
func (p *P4Project) Compile () error {
return nil
}
func (p *P4Project) GetProjectType () string {
return "p4"
}
// P4Project.MapViewPath
// - it is a special func for p4 only; to map a local path to server path
// /client/root/path/to/file --> //depot/path/to/file
func (p *P4Project) MapViewPath (path string) string {
if path == "/" {
for oneViewPath, oneLocalPath := range p.P4Details.Views {
path = strings.TrimSuffix(oneViewPath, strings.TrimPrefix(oneLocalPath, p.BaseDir))
return path
}
return ""
}
fullPath := filepath.Join(p.BaseDir, path)
matchedView := ""
matchedLocal := ""
maxLen := 0
for viewPath, localPath := range p.P4Details.Views {
if strings.HasPrefix(fullPath, localPath) {
n := len(localPath)
if n > maxLen {
matchedView = viewPath
matchedLocal = localPath
}
} else if fullPath + string(filepath.Separator) == localPath {
return viewPath
}
}
if matchedView == "" {
return ""
}
mappedPath := matchedView + strings.TrimPrefix(fullPath, matchedLocal)
if strings.HasSuffix(path, string(filepath.Separator)) && !strings.HasSuffix(mappedPath, "/") {
mappedPath += "/"
}
return mappedPath
}
func (p *P4Project) MapLocalPath (serverPath string) string {
matchedView := ""
matchedLocal := ""
maxLen := 0
for viewPath, localPath := range p.P4Details.Views {
if strings.HasPrefix(serverPath, viewPath) {
n := len(viewPath)
if n > maxLen {
matchedView = viewPath
matchedLocal = localPath
}
}
}
if matchedLocal == "" {
return ""
}
mappedPath := matchedLocal + strings.TrimPrefix(serverPath, matchedView)
mappedPath = strings.TrimPrefix(mappedPath, p.BaseDir)
return mappedPath
}
func (p *P4Project) GetFileTextContents (path, revision string) (string, error) {
B, err := p.GetFileBinaryContents(path, revision)
if err != nil {
return "", err
}
T := string(B)
if strings.Index(T, "\x00") >= 0 {
return "", fmt.Errorf("binary")
}
return T, nil
}
func (p *P4Project) GetFileBinaryContents (path, revision string) ([]byte, error) {
// P4CONFIG=.p4/config p4 print -q /path/to/file#54
url := p.MapViewPath(path)
if url == "" {
return nil, fmt.Errorf("non-tracked file")
}
if revision != "" {
url += "#" + revision
}
cmd := fmt.Sprintf(
"P4CONFIG=%s/.p4/config %s print -q %s",
p.BaseDir, P4_BIN, url,
)
contrib.PrintDebugCommand(cmd)
var err error
B := make([]byte, 0)
L := 0
contrib.Exec2Bytes(cmd, func (stream io.ReadCloser) {
n := 1024 * 1024 * 1
buf := make([]byte, n)
for n >= 1024 * 1024 * 1 {
L += n
if L > 1024 * 1024 * 10 {
// max reading size 10 MB
err = fmt.Errorf("larger than 10 MB")
return
}
n, err = stream.Read(buf)
if err != nil {
return
}
B = append(B, buf[0:n]...)
}
err = nil
})
if err != nil {
return nil, err
}
return B, nil
}
func (p *P4Project) GetFileHash (path, revision string) (string, error) {
// P4CONFIG=.p4/config p4 print -q /path/to/file#54
var url string
if revision == "" {
url = filepath.Join(p.BaseDir, path)
return contrib.FileHash(url)
} else {
url = p.MapViewPath(path)
if url == "" {
return "", fmt.Errorf("non-tracked file")
}
if revision != "" {
url += "#" + revision
}
cmd := fmt.Sprintf(
"P4CONFIG=%s/.p4/config %s print -q %s",
p.BaseDir, P4_BIN, url,
)
contrib.PrintDebugCommand(cmd)
var hash string
var err error
contrib.Exec2Bytes(cmd, func (stream io.ReadCloser) {
hash, err = contrib.IoHash(stream)
})
return hash, err
}
}
func (p *P4Project) GetFileLength (path, revision string) (int64, error) {
// P4CONFIG=.p4/config p4 print -q /path/to/file#54
var url string
if revision == "" {
url = filepath.Join(p.BaseDir, path)
return contrib.FileLen(url)
} else {
url = p.MapViewPath(path)
if url == "" {
return -1, fmt.Errorf("non-tracked file")
}
if revision != "" {
url += "#" + revision
}
cmd := fmt.Sprintf(
"P4CONFIG=%s/.p4/config %s print -q %s",
p.BaseDir, P4_BIN, url,
)
contrib.PrintDebugCommand(cmd)
var L int64
var err error
contrib.Exec2Bytes(cmd, func (stream io.ReadCloser) {
L, err = contrib.IoLen(stream)
})
return L, err
}
}
var p4AnnotateMatcher = regexp.MustCompile(`^(\d+):.*$`)
func (p *P4Project) GetFileBlameInfo (path, revision string, startLine, endLine int) ([]*BlameDetails, error) {
// P4CONFIG=.p4/config p4 annotate -q /path/to/file#54 (rev)
// P4CONFIG=.p4/config p4 annotate -I -q /path/to/file#54 (cln)
// Step 1: get fielog (ChangeNumber-Author map)
url := p.MapViewPath(path)
if url == "" {
return nil, fmt.Errorf("non-tracked file")
}
cmd := fmt.Sprintf(
"P4CONFIG=%s/.p4/config %s filelog -s -i %s",
p.BaseDir, P4_BIN, url,
)
contrib.PrintDebugCommand(cmd)
cacheAuthor := make(map[string]string, 0)
cacheTimestamp := make(map[string]int64, 0)
contrib.Exec2Lines(cmd, func (line string) {
parts := p4FilelogRevMatcher.FindStringSubmatch(line)
if parts != nil {
cacheAuthor[parts[2]] = parts[5]
// XXX: set p4 server timezone +0000
t, timeErr := time.Parse(
time.RFC3339,
strings.Join(strings.Split(strings.Split(parts[4], " ")[0], "/"), "-") + "T00:00:00Z",
)
if timeErr != nil {
cacheTimestamp[parts[2]] = -1
} else {
cacheTimestamp[parts[2]] = t.Unix()
}
return
}
})
// Step 2: get annotate
if revision != "" {
url += "#" + revision
}
cmd = fmt.Sprintf(
"P4CONFIG=%s/.p4/config %s annotate -q -c -I %s",
p.BaseDir, P4_BIN, url,
)
contrib.PrintDebugCommand(cmd)
blames := make([]*BlameDetails, 0)
lastCommit := ""
lineNo := 1
contrib.Exec2Lines(cmd, func (line string) {
parts := p4AnnotateMatcher.FindStringSubmatch(line)
if parts != nil {
if lineNo < startLine || lineNo > endLine {
lineNo ++
return
}
C := parts[1]
author, ok := cacheAuthor[C]
tp, _ := cacheTimestamp[C]
if !ok {
commitDetails, err := p.getCommitSummary(C)
if err == nil {
author = commitDetails.Author
tp = commitDetails.Timestamp
cacheAuthor[commitDetails.Id] = author
cacheTimestamp[commitDetails.Id] = tp
} else {
author = "(unknown)"
}
}
if lastCommit == C {
C = "^"
author = "^"
tp = 0
} else {
lastCommit = C
}
details := &BlameDetails{author, C, tp}
blames = append(blames, details)
lineNo ++
return
}
})
return blames, nil
}
var p4FilelogRevMatcher = regexp.MustCompile(`^\.\.\. #(\d+) change (\d+) ([a-z]+) on (\d{4}/\d{2}/\d{2} by ([^\s]+)@[^\s]+ .*)$`)
var p4FilelogExtraMatcher = regexp.MustCompile(`^\.\.\. \.\.\. ([a-z]+) from (.+)$`)
func (p *P4Project) GetFileCommitInfo (path string, offset, N int) ([]string, error) {
// P4CONFIG=.p4/config p4 filelog -s /path/to/file
/* samples
... #2 change \d+ integrate on YYYY/MM/DD by who@where (text) 'commit message short'
... ... copy from //depot/path/to/file#2
... #1 change \d+ branch on YYYY/MM/DD by who@where (text) 'commit message short'
... ... branch from //depot/path/to/file#1
*/
url := p.MapViewPath(path)
if url == "" {
return nil, fmt.Errorf("non-tracked file")
}
cmd := fmt.Sprintf(
"P4CONFIG=%s/.p4/config %s filelog -s %s",
p.BaseDir, P4_BIN, url,
)
contrib.PrintDebugCommand(cmd)
commits := make([]string, 0)
contrib.Exec2Lines(cmd, func (line string) {
parts := p4FilelogRevMatcher.FindStringSubmatch(line)
if parts != nil {
if offset > 0 {
offset --
return
}
if N == 0 {
return
}
commits = append(commits, parts[2])
N --
return
}
parts = p4FilelogExtraMatcher.FindStringSubmatch(line)
// TODO: deal with extra info
})
return commits, nil
}
var p4NoSuchFileMatcher = regexp.MustCompile(`^.* - no such file\(s\)\.$`)
var p4FoundFileMatcher = regexp.MustCompile(`^(.*)#(\d+) - [a-z/]+ change (\d+) .*$`)
func (p *P4Project) GetDirContents (path, revision string) ([]string, error) {
serverPath := p.MapViewPath(path)
var suffix string
if revision != "" {
suffix = "#" + suffix
}
if serverPath == "" {
return nil, fmt.Errorf("path not found")
}
list := make([]string, 0)
if !strings.HasSuffix(serverPath, string(filepath.Separator)) {
serverPath = serverPath + string(filepath.Separator)
}
cmd := fmt.Sprintf(
"P4CONFIG=%s/.p4/config %s files -e %s*%s",
p.BaseDir, P4_BIN, serverPath, suffix,
)
contrib.PrintDebugCommand(cmd)
contrib.Exec2Lines(cmd, func (line string) {
// //depot/path/to/file#4 - delete change 1234 (text)
// //depot/path/to/file#4 - branch change 1234 (text)
// //depot/path/to/file#4 - move/add change 1234 (text)
// //depot/path/to/file#4 - add change 1234 (text)
parts := p4NoSuchFileMatcher.FindStringSubmatch(line)
if parts != nil {
return
}
parts = p4FoundFileMatcher.FindStringSubmatch(line)
checkLocal := p.MapLocalPath(parts[1])
if checkLocal != "" {
list = append(list, filepath.Base(parts[1]))
}
})
cmd = fmt.Sprintf(
"P4CONFIG=%s/.p4/config %s dirs -C %s*%s",
p.BaseDir, P4_BIN, serverPath, suffix,
)
contrib.PrintDebugCommand(cmd)
contrib.Exec2Lines(cmd, func (line string) {
// //depot/path/to/dir
parts := p4NoSuchFileMatcher.FindStringSubmatch(line)
if parts != nil {
return
}
list = append(list, filepath.Base(line) + "/")
})
return list, nil
}
// GitProject /////////////////////////////////////////////////////////////////
type GitProject struct {
Name string
BaseDir string
Url, Branch string
}
func NewGitProject (projectName string, baseDir string, options map[string]string) *GitProject {
if GIT_BIN == "" {
log.Panic("[E] ! cannot find git command")
}
// baseDir: absolute path
url, ok := options["Url"]
if !ok {
log.Printf("P/%s: [E] missing Url\n", projectName)
return nil
}
branch, ok := options["Branch"]
if !ok {
branch = ""
}
p := &GitProject{projectName, baseDir, url, branch};
info, err := os.Stat(baseDir)
if err == nil {
if info.IsDir() {
p.getCurrentBranch()
} else {
log.Printf("P/%s: [E] %s is a file\n", projectName, baseDir)
return nil
}
} else {
log.Printf("P/%s: [W] missing Branch; using default\n", projectName)
}
return p
}
func (p *GitProject) GetName () string {
return p.Name
}
func (p *GitProject) GetBaseDir () string {
return p.BaseDir
}
func (p *GitProject) getCmdBaseDir () string {
if CYGWIN_ON {
return cygwinPath(p.BaseDir, CYGWIN_BASE_DIR)
}
return p.BaseDir
}
func (p *GitProject) GetMetadataDir () string {
return filepath.Join(p.BaseDir, ".git")
}
func (p *GitProject) getCurrentBranch () (string, error) {
cmd := fmt.Sprintf("%s -C %s branch", GIT_BIN, p.getCmdBaseDir())
contrib.PrintDebugCommand(cmd)
err := contrib.Exec2Lines(cmd, func (line string) {
if strings.HasPrefix(line, "* ") {
p.Branch = strings.Fields(line)[1]
}
})
return p.Branch, err
}
func (p *GitProject) clone (updatedList *map[string]string) error {
cmd := ""
if p.Branch == "" {
cmd = fmt.Sprintf(
"%s clone %s %s",
GIT_BIN, p.Url, p.getCmdBaseDir(),
)
contrib.PrintDebugCommand(cmd)
err := contrib.Exec2Lines(cmd, nil)
if err != nil {
return err
}
p.getCurrentBranch()
} else {
cmd = fmt.Sprintf(
"%s clone %s -b %s %s",
GIT_BIN, p.Url, p.Branch, p.getCmdBaseDir(),
)
contrib.PrintDebugCommand(cmd)
err := contrib.Exec2Lines(cmd, nil)
if err != nil {
return err
}
}
doWalk(p.BaseDir, ".git", updatedList)
return nil
}
var gitSyncLineMatcher = regexp.MustCompile(`^diff --git a([/].*) b([/].*)$`)
func (p *GitProject) extractSyncPath(line string, updatedList *map[string]string) {
parts := gitSyncLineMatcher.FindStringSubmatch(line)
if parts == nil {
return
}
a := parts[1]
b := parts[2]
if a == b {
(*updatedList)[b] = "modified"
} else {
// move a to b
(*updatedList)[a] = "deleted"
(*updatedList)[b] = "added"
}
}
func (p *GitProject) sync (updatedList *map[string]string) error {
cmd := fmt.Sprintf(
"%s -C %s fetch --all",
GIT_BIN, p.getCmdBaseDir(),
)
contrib.PrintDebugCommand(cmd)
contrib.Exec2Lines(cmd, nil)
if p.Branch == "" {
p.getCurrentBranch()
}
cmd = fmt.Sprintf(
"%s -C %s diff HEAD \"origin/%s\"",
GIT_BIN, p.getCmdBaseDir(), p.Branch,
)
contrib.PrintDebugCommand(cmd)
err := contrib.Exec2Lines(cmd, func (line string) {
p.extractSyncPath(line, updatedList)
})
for path, val := range *updatedList {
if val != "modified" {
continue
}
_, err := os.Stat(filepath.Join(p.BaseDir, path))
if os.IsNotExist(err) {
(*updatedList)[path] = "added"
}
}
cmd = fmt.Sprintf(
"%s -C %s reset --hard \"origin/%s\"",
GIT_BIN, p.getCmdBaseDir(), p.Branch,
)
contrib.PrintDebugCommand(cmd)
err = contrib.Exec2Lines(cmd, nil)
for path, val := range *updatedList {
if val != "modified" {
continue
}
_, err := os.Stat(filepath.Join(p.BaseDir, path))
if os.IsNotExist(err) {
(*updatedList)[path] = "deleted"
}
}
return err
}
func (p *GitProject) Sync () (map[string]string, error) {
updatedList := make(map[string]string)
fileinfo, err := os.Stat(p.BaseDir)
if os.IsNotExist(err) {
err = p.clone(&updatedList)
return updatedList, err
}
if err != nil {
return updatedList, err
}
if !fileinfo.IsDir() {
return updatedList, fmt.Errorf("P/%s: [E] cannot clone repo since \"%s\" is not a directory", p.Name)
}
err = p.sync(&updatedList)
return updatedList, err
}
func (p *GitProject) Compile () error {
return nil
}
func (p *GitProject) GetProjectType () string {
return "git"
}
func (p *GitProject) GetFileTextContents (path, revision string) (string, error) {
B, err := p.GetFileBinaryContents(path, revision)
if err != nil {
return "", err
}
T := string(B)
if strings.Index(T, "\x00") >= 0 {
return "", fmt.Errorf("binary")
}
return T, nil
}
func (p *GitProject) GetFileBinaryContents (path, revision string) ([]byte, error) {
sep := string(filepath.Separator)
if CYGWIN_ON {
sep = "/"
}
url := fmt.Sprintf("%s:%s", revision, strings.TrimLeft(path, sep))
cmd := fmt.Sprintf("%s -C %s show %s", GIT_BIN, p.getCmdBaseDir(), url)
contrib.PrintDebugCommand(cmd)
var err error
B := make([]byte, 0)
L := 0
contrib.Exec2Bytes(cmd, func (stream io.ReadCloser) {
n := 1024 * 1024 * 1
buf := make([]byte, n)
for n >= 1024 * 1024 * 1 {
L += n
if L > 1024 * 1024 * 10 {
// max reading size 10 MB
err = fmt.Errorf("larger than 10 MB")
return
}
n, err = stream.Read(buf)
if err != nil {
return
}
B = append(B, buf[0:n]...)
}
err = nil
})
if err != nil {
return nil, err
}
return B, nil
}
func (p *GitProject) GetFileHash (path, revision string) (string, error) {
var url string
if revision == "" {
url = filepath.Join(p.BaseDir, path)
return contrib.FileHash(url)
} else {
sep := string(filepath.Separator)
if CYGWIN_ON {
sep = "/"
}
url = fmt.Sprintf("%s:%s", revision, strings.TrimLeft(path, sep))
cmd := fmt.Sprintf("%s -C %s show %s", GIT_BIN, p.getCmdBaseDir(), url)
contrib.PrintDebugCommand(cmd)
var hash string
var err error
contrib.Exec2Bytes(cmd, func (stream io.ReadCloser) {
hash, err = contrib.IoHash(stream)
})
return hash, err
}
}
func (p *GitProject) GetFileLength (path, revision string) (int64, error) {
var url string
if revision == "" {
url = filepath.Join(p.BaseDir, path)
return contrib.FileLen(url)
} else {
sep := string(filepath.Separator)
if CYGWIN_ON {
sep = "/"
}
url = fmt.Sprintf("%s:%s", revision, strings.TrimLeft(path, sep))
cmd := fmt.Sprintf("%s -C %s show %s", GIT_BIN, p.getCmdBaseDir(), url)
contrib.PrintDebugCommand(cmd)
var L int64
var err error
contrib.Exec2Bytes(cmd, func (stream io.ReadCloser) {
L, err = contrib.IoLen(stream)
})
return L, err
}
}
var gitBlameLineMatcher = regexp.MustCompile(`^\^?([a-f0-9]+) .*\(<(.*@.*)>\s+(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [+\-]\d{4})\s+\d+\)\s+.*$`)
// ^ hash | | ^ datetime | ^ linecontents
// ^ rename ^ linenumber
// ^ author
var gitBlameMaxLineMatcher = regexp.MustCompile(`fatal: file .* has only (\d+) lines`)
func (p *GitProject) GetFileBlameInfo (path, revision string, startLine, endLine int) ([]*BlameDetails, error) {
var Lrange string
if startLine <= 0 {
startLine = 1
}
if endLine <= 0 {
Lrange = ""
} else {
Lrange = fmt.Sprintf("-L %d,%d", startLine, endLine)
}
cmd := fmt.Sprintf(
"%s -C %s blame -e -l %s %s -- %s",
GIT_BIN, p.getCmdBaseDir(), Lrange, revision, filepath.Join(p.getCmdBaseDir(), path),
)
contrib.PrintDebugCommand(cmd)
blames := make([]*BlameDetails, 0)
lastCommit := ""
contrib.Exec2Lines(cmd, func (line string) {
parts := gitBlameMaxLineMatcher.FindStringSubmatch(line)
if parts != nil {
max, err := strconv.Atoi(parts[1])
if err != nil {
return
}
blames, err = p.GetFileBlameInfo(path, revision, startLine, max)
return
}
parts = gitBlameLineMatcher.FindStringSubmatch(line)
var email string
var commit string
var tp int64
if parts == nil {
email = "(unknown)"
commit = "(unknown)"
tp = 0
} else {
email = parts[2]
commit = parts[1]
datetime_parts := strings.Split(parts[3], " ")
timezone_runes := []rune(datetime_parts[2])
t, timeErr := time.Parse(
time.RFC3339,
fmt.Sprintf(
"%sT%s%s:%s",
datetime_parts[0],
datetime_parts[1],
string(timezone_runes[0:3]),
string(timezone_runes[3:5]),
),
)
if timeErr != nil {
tp = -1
} else {
tp = t.Unix()
}
}
if commit == lastCommit {
// to shorten blame emails for lines
email = "^"
commit = "^"
tp = 0
} else {
lastCommit = commit
}
details := &BlameDetails{email, commit, tp}
blames = append(blames, details)
})
return blames, nil
}
func (p *GitProject) GetFileCommitInfo (path string, offset, N int) ([]string, error) {
cmd := fmt.Sprintf(
"%s -C %s log --pretty=format:%%H -- %s",
GIT_BIN, p.getCmdBaseDir(), filepath.Join(p.getCmdBaseDir(), path),
)
contrib.PrintDebugCommand(cmd)
commits := make([]string, 0)
contrib.Exec2Lines(cmd, func (line string) {
if line == "" {
return
}
if offset > 0 {
offset --
return
}
if N == 0 {
// if N = -1, dump all commit hashes
return
}
commits = append(commits, line)
N --
})
return commits, nil
}
func (p *GitProject) GetDirContents (path, revision string) ([]string, error) {
path = filepath.Join(p.BaseDir, path)
if !strings.HasSuffix(path, string(filepath.Separator)) {
path += string(filepath.Separator)
}
list := make([]string, 0)
if p.Branch == "" {
p.getCurrentBranch()
}
if revision == "" {
revision = p.Branch
}
cmdPath := path
if CYGWIN_ON {
cmdPath = cygwinPath(path, CYGWIN_BASE_DIR)
}
cmd := fmt.Sprintf(
"%s -C %s ls-tree --name-only %s -- %s",
GIT_BIN, p.getCmdBaseDir(), revision, cmdPath,
)
contrib.PrintDebugCommand(cmd)
contrib.Exec2Lines(cmd, func (line string) {
if line == "" {
return
}
fullPath := filepath.Join(p.BaseDir, line)
info, err := os.Stat(fullPath)
prefix := strings.TrimPrefix(path, p.BaseDir)
line = "/" + line
if err == nil {
// XXX: fix for windows? line.replaceAll("\\", "/")
if info.IsDir() {
line = line + "/"
list = append(list, strings.TrimPrefix(line, prefix))
return
}
}
list = append(list, strings.TrimPrefix(line, prefix))
})
return list, nil
}
func doWalk (baseDir string, ignoredDir string, updatedList *map[string]string) error {
return filepath.Walk(baseDir, func (path string, info os.FileInfo, err error) error {
if err != nil {
log.Printf("D/%s: [analysis.doWalk/W] cannot get file list ...\n", baseDir)
return err
}
if info.IsDir() {
if info.Name() == ignoredDir {
return filepath.SkipDir
}
} else {
(*updatedList)[strings.TrimPrefix(path, baseDir)] = "added"
}
return nil
})
}
| [
"\"ZOEKT_P4_BIN\"",
"\"ZOEKT_GIT_BIN\"",
"\"ZOEKT_CTAGS_BIN\"",
"\"ZOEKT_CYGWIN_BASE_DIR\""
]
| []
| [
"ZOEKT_CYGWIN_BASE_DIR",
"ZOEKT_GIT_BIN",
"ZOEKT_P4_BIN",
"ZOEKT_CTAGS_BIN"
]
| [] | ["ZOEKT_CYGWIN_BASE_DIR", "ZOEKT_GIT_BIN", "ZOEKT_P4_BIN", "ZOEKT_CTAGS_BIN"] | go | 4 | 0 | |
rest-api/app.py | import json
import os
from flask import Flask, request, Response
from Tracker import Tracker
app = Flask(__name__)
tracker = Tracker()
@app.route('/track/<name>', methods=['POST'])
def track(name):
data = request.get_json()
tracker.update(name, data)
return json.dumps({"status": "Ok"})
@app.route('/metrics')
def metrics():
return tracker.get_metrics()
app.run(host='0.0.0.0', debug=True, port=os.environ.get("REST_API_PORT", "5000"))
| []
| []
| [
"REST_API_PORT"
]
| [] | ["REST_API_PORT"] | python | 1 | 0 | |
lib/cgi_tweaked.py | #! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from io import StringIO, BytesIO, TextIOWrapper
import sys
import os
import urllib.parse
from email.parser import FeedParser
from warnings import warn
import html
import locale
import tempfile
__all__ = ["MiniFieldStorage", "FieldStorage",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
encoding = fp.encoding
else:
encoding = 'latin-1'
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
fp = fp.buffer
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
encoding=encoding)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead",
DeprecationWarning, 2)
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead",
DeprecationWarning, 2)
return urllib.parse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
import http.client
boundary = b""
if 'boundary' in pdict:
boundary = pdict['boundary'].encode("utf-8")
if not valid_boundary(boundary):
raise ValueError('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = b"--" + boundary
lastpart = b"--" + boundary + b"--"
partdict = {}
terminator = b""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = http.client.parse_headers(fp)
clength = headers.get('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError('Maximum content length exceeded')
data = fp.read(bytes)
else:
data = b""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line.startswith(b"--"):
terminator = line.rstrip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == b"\r\n":
line = line[:-2]
elif line[-1:] == b"\n":
line = line[:-1]
lines[-1] = line
data = b"".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
and returns *bytes*
file: the file(-like) object from which you can read the data *as
bytes* ; None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes email.message.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
limit=None, encoding='utf-8', errors='replace'):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
(not used when the request method is GET)
Can be :
1. a TextIOWrapper object
2. an object whose read() and readline() methods return bytes
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
limit : used internally to read parts of multipart/form-data forms,
to exit from the reading loop when reached. It is the difference
between the form content-length and the number of bytes already
read
encoding, errors : the encoding and error handler used to decode the
binary stream to strings. Must be the same as the charset defined
for the page sending the form (content-type : meta http-equiv or
header)
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
fp = BytesIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
if fp is None:
self.fp = sys.stdin.buffer
# self.fp.read() must return bytes
elif isinstance(fp, TextIOWrapper):
self.fp = fp.buffer
else:
self.fp = fp
self.encoding = encoding
self.errors = errors
self.headers = headers
if not isinstance(outerboundary, bytes):
raise TypeError('outerboundary must be bytes, not %s'
% type(outerboundary).__name__)
self.outerboundary = outerboundary
self.bytes_read = 0
self.limit = limit
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
self._binary_file = self.filename is not None
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
if 'boundary' in pdict:
self.innerboundary = pdict['boundary'].encode(self.encoding)
else:
self.innerboundary = b""
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError('Maximum content length exceeded')
self.length = clen
if self.limit is None and clen:
self.limit = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError(name)
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError("not indexable")
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError(key)
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if isinstance(value, list):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError("not indexable")
return list(set(item.name for item in self.list))
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError("not indexable")
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if not isinstance(qs, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(qs).__name__))
qs = qs.decode(self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
self.list = []
query = urllib.parse.parse_qsl(
qs, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
self.list = []
if self.qs_on_post:
query = urllib.parse.parse_qsl(
self.qs_on_post, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors)
for key, value in query:
self.list.append(MiniFieldStorage(key, value))
FieldStorageClass = None
klass = self.FieldStorageClass or self.__class__
first_line = self.fp.readline() # bytes
if not isinstance(first_line, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(first_line).__name__))
self.bytes_read += len(first_line)
# first line holds boundary ; ignore it, or check that
# b"--" + ib == first_line.strip() ?
while True:
parser = FeedParser()
hdr_text = b""
while True:
data = self.fp.readline()
hdr_text += data
if not data.strip():
break
if not hdr_text:
break
# parser takes strings, not bytes
self.bytes_read += len(hdr_text)
parser.feed(hdr_text.decode(self.encoding, self.errors))
headers = parser.close()
part = klass(self.fp, headers, ib, environ, keep_blank_values,
strict_parsing,self.limit-self.bytes_read,
self.encoding, self.errors)
self.bytes_read += part.bytes_read
self.list.append(part)
if self.bytes_read >= self.length:
break
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file()
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize)) # bytes
if not isinstance(data, bytes):
raise ValueError("%s should return bytes, got %s"
% (self.fp, type(data).__name__))
self.bytes_read += len(data)
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
if self._binary_file:
self.file = self.__file = BytesIO() # store data as bytes for files
else:
self.file = self.__file = StringIO() # as strings for other fields
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
"""line is always bytes, not string"""
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file()
data = self.__file.getvalue()
self.file.write(data)
self.__file = None
if self._binary_file:
# keep bytes
self.file.write(line)
else:
# decode to string
self.file.write(line.decode(self.encoding, self.errors))
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary.
Data is read as bytes: boundaries and line ends must be converted
to bytes for comparisons.
"""
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
delim = b""
last_line_lfend = True
_read = 0
while 1:
if _read >= self.limit:
break
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
_read += len(line)
if not line:
self.done = -1
break
if line.startswith(b"--") and last_line_lfend:
strippedline = line.rstrip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
odelim = delim
if line.endswith(b"\r\n"):
delim = b"\r\n"
line = line[:-2]
last_line_lfend = True
elif line.endswith(b"\n"):
delim = b"\n"
line = line[:-1]
last_line_lfend = True
else:
delim = b""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
last_line_lfend = True
while True:
line = self.fp.readline(1<<16)
self.bytes_read += len(line)
if not line:
self.done = -1
break
if line.endswith(b"--") and last_line_lfend:
strippedline = line.strip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
last_line_lfend = line.endswith(b'\n')
def make_file(self):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The file is opened in binary mode for files, in text mode
for other fields
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
if self._binary_file:
return tempfile.TemporaryFile("wb+")
else:
return tempfile.TemporaryFile("w+",
encoding=self.encoding, newline = '\n')
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print("Content-type: text/html")
print()
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec("testing print_exception() -- <I>italics?</I>")
def g(f=f):
f()
print("<H3>What follows is a test, not an actual exception:</H3>")
g()
except:
print_exception()
print("<H1>Second try with a small maxlen...</H1>")
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print()
print("<H3>Traceback (most recent call last):</H3>")
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print("<PRE>%s<B>%s</B></PRE>" % (
html.escape("".join(list[:-1])),
html.escape(list[-1]),
))
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = sorted(environ.keys())
print()
print("<H3>Shell Environment:</H3>")
print("<DL>")
for key in keys:
print("<DT>", html.escape(key), "<DD>", html.escape(environ[key]))
print("</DL>")
print()
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = sorted(form.keys())
print()
print("<H3>Form Contents:</H3>")
if not keys:
print("<P>No form fields.")
print("<DL>")
for key in keys:
print("<DT>" + html.escape(key) + ":", end=' ')
value = form[key]
print("<i>" + html.escape(repr(type(value))) + "</i>")
print("<DD>" + html.escape(repr(value)))
print("</DL>")
print()
def print_directory():
"""Dump the current directory as HTML."""
print()
print("<H3>Current Working Directory:</H3>")
try:
pwd = os.getcwd()
except os.error as msg:
print("os.error:", html.escape(str(msg)))
else:
print(html.escape(pwd))
print()
def print_arguments():
print()
print("<H3>Command Line Arguments:</H3>")
print()
print(sys.argv)
print()
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print("""
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
""")
# Utilities
# =========
def escape(s, quote=None):
"""Deprecated API."""
warn("cgi.escape is deprecated, use html.escape instead",
PendingDeprecationWarning, stacklevel=2)
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern=None):
import re
if isinstance(s, bytes):
_vb_pattern = b"^[ -~]{0,200}[!-~]$"
else:
_vb_pattern = "^[ -~]{0,200}[!-~]$"
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
newsblur_web/docker_local_settings.py | import logging
import os
# ===================
# = Server Settings =
# ===================
ADMINS = (
('Samuel Clay', '[email protected]'),
)
SERVER_EMAIL = '[email protected]'
HELLO_EMAIL = '[email protected]'
NEWSBLUR_URL = 'https://localhost'
SESSION_COOKIE_DOMAIN = 'localhost'
# ===================
# = Global Settings =
# ===================
DOCKERBUILD = True
DEBUG = False
DEBUG = True
DEBUG_ASSETS = True
DEBUG_QUERIES = True
DEBUG_QUERIES_SUMMARY_ONLY = True
MEDIA_URL = '/media/'
IMAGES_URL = '/imageproxy'
SECRET_KEY = 'YOUR SECRET KEY'
AUTO_PREMIUM_NEW_USERS = True
AUTO_ENABLE_NEW_USERS = True
ENFORCE_SIGNUP_CAPTCHA = False
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'db_redis:6579',
'OPTIONS': {
'DB': 6,
'PARSER_CLASS': 'redis.connection.HiredisParser'
},
},
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Set this to the username that is shown on the homepage to unauthenticated users.
HOMEPAGE_USERNAME = 'popular'
# Google Reader OAuth API Keys
OAUTH_KEY = 'www.example.com'
OAUTH_SECRET = 'SECRET_KEY_FROM_GOOGLE'
S3_ACCESS_KEY = 'XXX'
S3_SECRET = 'SECRET'
S3_BACKUP_BUCKET = 'newsblur_backups'
S3_PAGES_BUCKET_NAME = 'pages-XXX.newsblur.com'
S3_ICONS_BUCKET_NAME = 'icons-XXX.newsblur.com'
STRIPE_SECRET = "YOUR-SECRET-API-KEY"
STRIPE_PUBLISHABLE = "YOUR-PUBLISHABLE-API-KEY"
# ===============
# = Social APIs =
# ===============
FACEBOOK_APP_ID = '111111111111111'
FACEBOOK_SECRET = '99999999999999999999999999999999'
TWITTER_CONSUMER_KEY = 'ooooooooooooooooooooo'
TWITTER_CONSUMER_SECRET = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
YOUTUBE_API_KEY = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# =============
# = Databases =
# =============
DATABASES = {
'default': {
'NAME': 'newsblur',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'ENGINE': 'django.db.backends.mysql',
'USER': 'newsblur',
'PASSWORD': 'newsblur',
'HOST': 'db_postgres',
'PORT': 5432
},
}
MONGO_DB = {
'name': 'newsblur',
'host': 'db_mongo:29019'
}
MONGO_ANALYTICS_DB = {
'name': 'nbanalytics',
'host': 'db_mongo:29019',
}
MONGODB_SLAVE = {
'host': 'db_mongo'
}
# Celery RabbitMQ/Redis Broker
BROKER_URL = "redis://db_redis:6579/0"
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_WORKER_CONCURRENCY = 1
REDIS_USER = {
'host': 'db_redis',
'port': 6579
}
REDIS_PUBSUB = {
'host': 'db_redis',
'port': 6579
}
REDIS_STORY = {
'host': 'db_redis',
'port': 6579
}
REDIS_SESSIONS = {
'host': 'db_redis',
'port': 6579
}
CELERY_REDIS_DB_NUM = 4
SESSION_REDIS_DB = 5
ELASTICSEARCH_FEED_HOSTS = ["db_elasticsearch:9200"]
ELASTICSEARCH_STORY_HOSTS = ["db_elasticsearch:9200"]
ELASTICSEARCH_FEED_HOST = "http://db_elasticsearch:9200"
ELASTICSEARCH_STORY_HOST = "http://db_elasticsearch:9200"
BACKED_BY_AWS = {
'pages_on_node': False,
'pages_on_s3': False,
'icons_on_s3': False,
}
# ===========
# = Logging =
# ===========
# Logging (setup for development)
LOG_TO_STREAM = True
if len(logging._handlerList) < 1:
LOG_FILE = '~/newsblur/logs/development.log'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)-12s: %(message)s',
datefmt='%b %d %H:%M:%S',
handler=logging.StreamHandler)
S3_ACCESS_KEY = '000000000000000000000'
S3_SECRET = '000000000000000000000000/0000000000000000'
S3_BACKUP_BUCKET = 'newsblur_backups'
S3_PAGES_BUCKET_NAME = 'pages-dev.newsblur.com'
S3_ICONS_BUCKET_NAME = 'icons-dev.newsblur.com'
S3_AVATARS_BUCKET_NAME = 'avatars-dev.newsblur.com'
MAILGUN_ACCESS_KEY = 'key-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
MAILGUN_SERVER_NAME = 'newsblur.com'
DO_TOKEN_LOG = '0000000000000000000000000000000000000000000000000000000000000000'
DO_TOKEN_FABRIC = '0000000000000000000000000000000000000000000000000000000000000000'
SERVER_NAME = "nblocalhost"
NEWSBLUR_URL = os.getenv("NEWSBLUR_URL", "https://localhost")
if NEWSBLUR_URL == 'https://localhost':
SESSION_COOKIE_DOMAIN = "localhost"
SESSION_ENGINE = 'redis_sessions.session'
# CORS_ORIGIN_REGEX_WHITELIST = ('^(https?://)?(\w+\.)?nb.local\.com$', )
RECAPTCHA_SECRET_KEY = "0000000000000000000000000000000000000000"
IMAGES_SECRET_KEY = "0000000000000000000000000000000"
| []
| []
| [
"NEWSBLUR_URL"
]
| [] | ["NEWSBLUR_URL"] | python | 1 | 0 | |
src/python/tests/core/google_cloud_utils/gsutil_test.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gsutil."""
import os
import mock
from google_cloud_utils import gsutil
from pyfakefs import fake_filesystem_unittest
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
class GSUtilRunnerTest(fake_filesystem_unittest.TestCase):
"""GSUtilRunner tests."""
def setUp(self):
test_helpers.patch_environ(self)
test_helpers.patch(self, ["system.new_process.ProcessRunner.run_and_wait"])
test_utils.set_up_pyfakefs(self)
self.gsutil_runner_obj = gsutil.GSUtilRunner()
def test_rsync_remote_gcs_1(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync("gs://source_bucket/source_path",
"gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-d",
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=18000,
env=mock.ANY,
)
def test_rsync_local_gcs_1(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync("gs://source_bucket/source_path",
"gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-d",
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=18000,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_rsync_remote_gcs_2(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-d",
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=1337,
env=mock.ANY,
)
def test_rsync_local_gcs_2(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-d",
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_rsync_remote_gcs_3(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
delete=False,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=18000,
env=mock.ANY,
)
def test_rsync_local_gcs_3(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
delete=False,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=18000,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_rsync_remote_gcs_4(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
delete=False,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=1337,
env=mock.ANY,
)
def test_rsync_local_gcs_4(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
delete=False,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_rsync_remote_gcs_5(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
delete=False,
exclusion_pattern='"*.txt$"',
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-x",
'"*.txt$"',
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=1337,
env=mock.ANY,
)
def test_rsync_local_gcs_5(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
delete=False,
exclusion_pattern='"*.txt$"',
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-x",
'"*.txt$"',
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_download_file_remote_gcs_1(self):
"""Test download_file."""
self.gsutil_runner_obj.download_file("gs://source_bucket/source_path",
"/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "gs://source_bucket/source_path", "/target_path"],
timeout=None,
env=mock.ANY,
)
def test_download_file_local_gcs_1(self):
"""Test download_file."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.gsutil_runner_obj.download_file("gs://source_bucket/source_path",
"/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "/local/source_bucket/objects/source_path", "/target_path"],
timeout=None,
env=mock.ANY,
)
def test_download_file_remote_gcs_2(self):
"""Test download_file."""
self.gsutil_runner_obj.download_file(
"gs://source_bucket/source_path", "/target_path", timeout=1337)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "gs://source_bucket/source_path", "/target_path"],
timeout=1337,
env=mock.ANY,
)
def test_download_file_local_gcs_2(self):
"""Test download_file."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.gsutil_runner_obj.download_file(
"gs://source_bucket/source_path", "/target_path", timeout=1337)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "/local/source_bucket/objects/source_path", "/target_path"],
timeout=1337,
env=mock.ANY,
)
def test_upload_file_remote_gcs_1(self):
"""Test upload_file."""
self.gsutil_runner_obj.upload_file("/source_path",
"gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "/source_path", "gs://target_bucket/target_path"],
timeout=None,
env=mock.ANY,
)
def test_upload_file_local_gcs_1(self):
"""Test upload_file."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.upload_file("/source_path",
"gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "/source_path", "/local/target_bucket/objects/target_path"],
timeout=None,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_upload_file_remote_gcs_2(self):
"""Test upload_file."""
self.gsutil_runner_obj.upload_file(
"/source_path",
"gs://target_bucket/target_path",
timeout=1337,
gzip=True,
metadata={"a": "b"},
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-h", "a:b", "cp", "-Z", "/source_path",
"gs://target_bucket/target_path"
],
timeout=1337,
env=mock.ANY,
)
def test_upload_file_local_gcs_2(self):
"""Test upload_file."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.upload_file(
"/source_path",
"gs://target_bucket/target_path",
timeout=1337,
gzip=True,
metadata={"a": "b"},
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-h",
"a:b",
"cp",
"-Z",
"/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_upload_files_to_url_remote_gcs_1(self):
"""Test upload_files_to_url."""
self.gsutil_runner_obj.upload_files_to_url(
["/source_path1", "/source_path2"], "gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "-I", "gs://target_bucket/target_path"],
input_data="/source_path1\n/source_path2",
timeout=None,
env=mock.ANY,
)
def test_upload_files_local_gcs_1(self):
"""Test upload_files_to_url."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.upload_files_to_url(
["/source_path1", "/source_path2"], "gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "-I", "/local/target_bucket/objects/target_path"],
input_data="/source_path1\n/source_path2",
timeout=None,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_upload_files_remote_gcs_2(self):
"""Test upload_files_to_url."""
self.gsutil_runner_obj.upload_files_to_url(
["/source_path1", "/source_path2"],
"gs://target_bucket/target_path",
timeout=1337,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "-I", "gs://target_bucket/target_path"],
input_data="/source_path1\n/source_path2",
timeout=1337,
env=mock.ANY,
)
def test_upload_files_to_url_local_gcs_2(self):
"""Test upload_files_to_url."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.upload_files_to_url(
["/source_path1", "/source_path2"],
"gs://target_bucket/target_path",
timeout=1337,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "-I", "/local/target_bucket/objects/target_path"],
input_data="/source_path1\n/source_path2",
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
| []
| []
| [
"LOCAL_GCS_BUCKETS_PATH"
]
| [] | ["LOCAL_GCS_BUCKETS_PATH"] | python | 1 | 0 | |
kodland/wsgi.py | """
WSGI config for kodland project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kodland.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/standalone/standalone.go | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package standalone
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"os/user"
"path"
path_filepath "path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/docker/docker/client"
"github.com/fatih/color"
"github.com/briandowns/spinner"
"github.com/dapr/cli/pkg/print"
"github.com/dapr/cli/utils"
)
const (
daprGitHubOrg = "dapr"
daprGitHubRepo = "dapr"
daprDockerImageName = "daprio/dapr"
daprRuntimeFilePrefix = "daprd"
daprWindowsOS = "windows"
daprLatestVersion = "latest"
daprDefaultLinuxAndMacInstallPath = "/usr/local/bin"
daprDefaultWindowsInstallPath = "c:\\dapr"
// DaprPlacementContainerName is the container name of placement service
DaprPlacementContainerName = "dapr_placement"
// DaprRedisContainerName is the container name of redis
DaprRedisContainerName = "dapr_redis"
)
// Init installs Dapr on a local machine using the supplied runtimeVersion.
func Init(runtimeVersion string, dockerNetwork string, installLocation string) error {
dockerInstalled := isDockerInstalled()
if !dockerInstalled {
return errors.New("could not connect to Docker. Docker may not be installed or running")
}
dir, err := getDaprDir()
if err != nil {
return err
}
var wg sync.WaitGroup
errorChan := make(chan error)
initSteps := []func(*sync.WaitGroup, chan<- error, string, string, string, string){}
initSteps = append(initSteps, installDaprBinary, runPlacementService, runRedis)
wg.Add(len(initSteps))
msg := "Downloading binaries and setting up components..."
var s *spinner.Spinner
if runtime.GOOS == daprWindowsOS {
print.InfoStatusEvent(os.Stdout, msg)
} else {
s = spinner.New(spinner.CharSets[0], 100*time.Millisecond)
s.Writer = os.Stdout
s.Color("cyan")
s.Suffix = fmt.Sprintf(" %s", msg)
s.Start()
}
for _, step := range initSteps {
go step(&wg, errorChan, dir, runtimeVersion, dockerNetwork, installLocation)
}
go func() {
wg.Wait()
close(errorChan)
}()
for err := range errorChan {
if err != nil {
if s != nil {
s.Stop()
}
return err
}
}
if s != nil {
s.Stop()
err = confirmContainerIsRunning(DaprRedisContainerName)
if err != nil {
return err
}
print.SuccessStatusEvent(os.Stdout, msg)
}
return nil
}
func isDockerInstalled() bool {
cli, err := client.NewEnvClient()
if err != nil {
return false
}
_, err = cli.Ping(context.Background())
return err == nil
}
func getDaprDir() (string, error) {
p := ""
if runtime.GOOS == daprWindowsOS {
p = path_filepath.FromSlash("c:/dapr")
} else {
usr, err := user.Current()
if err != nil {
return "", err
}
p = path.Join(usr.HomeDir, ".dapr")
}
err := os.MkdirAll(p, 0700)
if err != nil {
return "", err
}
return p, nil
}
// installLocation is not used, but it is present because it's required to fit the initSteps func above.
// If the number of args increases more, we may consider passing in a struct instead of individual args.
func runRedis(wg *sync.WaitGroup, errorChan chan<- error, dir, version string, dockerNetwork string, installLocation string) {
defer wg.Done()
args := []string{
"run",
"--name", utils.CreateContainerName(DaprRedisContainerName, dockerNetwork),
"--restart", "always",
"-d",
}
if dockerNetwork != "" {
args = append(
args,
"--network", dockerNetwork,
"--network-alias", DaprRedisContainerName)
} else {
args = append(
args,
"-p", "6379:6379")
}
args = append(args, "redis")
_, err := utils.RunCmdAndWait("docker", args...)
if err != nil {
runError := isContainerRunError(err)
if !runError {
errorChan <- parseDockerError("Redis state store", err)
} else {
errorChan <- fmt.Errorf("docker %s failed with: %v", args, err)
}
return
}
errorChan <- nil
}
func confirmContainerIsRunning(containerName string) error {
// e.g. docker ps --filter name=dapr_redis --filter status=running --format {{.Names}}
args := []string{"ps", "--filter", "name=" + containerName, "--filter", "status=running", "--format", "{{.Names}}"}
response, err := utils.RunCmdAndWait("docker", args...)
response = strings.TrimSuffix(response, "\n")
// If 'docker ps' failed due to some reason
if err != nil {
return fmt.Errorf("unable to confirm whether %s is running. error\n%v", containerName, err.Error())
}
// 'docker ps' worked fine, but the response did not have the container name
if response == "" || response != containerName {
return fmt.Errorf("container %s is not running", containerName)
}
return nil
}
func parseDockerError(component string, err error) error {
if exitError, ok := err.(*exec.ExitError); ok {
exitCode := exitError.ExitCode()
if exitCode == 125 { //see https://github.com/moby/moby/pull/14012
return fmt.Errorf("failed to launch %s. Is it already running?", component)
}
if exitCode == 127 {
return fmt.Errorf("failed to launch %s. Make sure Docker is installed and running", component)
}
}
return err
}
func isContainerRunError(err error) bool {
if exitError, ok := err.(*exec.ExitError); ok {
exitCode := exitError.ExitCode()
return exitCode == 125
}
return false
}
func runPlacementService(wg *sync.WaitGroup, errorChan chan<- error, dir, version string, dockerNetwork string, installLocation string) {
defer wg.Done()
image := fmt.Sprintf("%s:%s", daprDockerImageName, version)
// Use only image for latest version
if version == daprLatestVersion {
image = daprDockerImageName
}
args := []string{
"run",
"--name", utils.CreateContainerName(DaprPlacementContainerName, dockerNetwork),
"--restart", "always",
"-d",
"--entrypoint", "./placement",
}
if dockerNetwork != "" {
args = append(args,
"--network", dockerNetwork,
"--network-alias", DaprPlacementContainerName)
} else {
osPort := 50005
if runtime.GOOS == daprWindowsOS {
osPort = 6050
}
args = append(args,
"-p", fmt.Sprintf("%v:50005", osPort))
}
args = append(args, image)
_, err := utils.RunCmdAndWait("docker", args...)
if err != nil {
runError := isContainerRunError(err)
if !runError {
errorChan <- parseDockerError("placement service", err)
} else {
errorChan <- fmt.Errorf("docker %s failed with: %v", args, err)
}
return
}
errorChan <- nil
}
func installDaprBinary(wg *sync.WaitGroup, errorChan chan<- error, dir, version string, dockerNetwork string, installLocation string) {
defer wg.Done()
archiveExt := "tar.gz"
if runtime.GOOS == daprWindowsOS {
archiveExt = "zip"
}
if version == daprLatestVersion {
var err error
version, err = getLatestRelease(daprGitHubOrg, daprGitHubRepo)
if err != nil {
errorChan <- fmt.Errorf("cannot get the latest release version: %s", err)
return
}
version = version[1:]
}
daprURL := fmt.Sprintf(
"https://github.com/%s/%s/releases/download/v%s/%s_%s_%s.%s",
daprGitHubOrg,
daprGitHubRepo,
version,
daprRuntimeFilePrefix,
runtime.GOOS,
runtime.GOARCH,
archiveExt)
filepath, err := downloadFile(dir, daprURL)
if err != nil {
errorChan <- fmt.Errorf("error downloading Dapr binary: %s", err)
return
}
extractedFilePath := ""
if archiveExt == "zip" {
extractedFilePath, err = unzip(filepath, dir)
} else {
extractedFilePath, err = untar(filepath, dir)
}
if err != nil {
errorChan <- fmt.Errorf("error extracting Dapr binary: %s", err)
return
}
daprPath, err := moveFileToPath(extractedFilePath, installLocation)
if err != nil {
errorChan <- fmt.Errorf("error moving Dapr binary to path: %s", err)
return
}
err = makeExecutable(daprPath)
if err != nil {
errorChan <- fmt.Errorf("error making Dapr binary executable: %s", err)
return
}
errorChan <- nil
}
func makeExecutable(filepath string) error {
if runtime.GOOS != daprWindowsOS {
err := os.Chmod(filepath, 0777)
if err != nil {
return err
}
}
return nil
}
func unzip(filepath, targetDir string) (string, error) {
zipReader, err := zip.OpenReader(filepath)
if err != nil {
return "", err
}
if len(zipReader.Reader.File) > 0 {
file := zipReader.Reader.File[0]
zippedFile, err := file.Open()
if err != nil {
return "", err
}
defer zippedFile.Close()
extractedFilePath := path.Join(
targetDir,
file.Name,
)
outputFile, err := os.OpenFile(
extractedFilePath,
os.O_WRONLY|os.O_CREATE|os.O_TRUNC,
file.Mode(),
)
if err != nil {
return "", err
}
defer outputFile.Close()
_, err = io.Copy(outputFile, zippedFile)
if err != nil {
return "", err
}
return extractedFilePath, nil
}
return "", nil
}
func untar(filepath, targetDir string) (string, error) {
tarFile, err := os.Open(filepath)
if err != nil {
return "", err
}
defer tarFile.Close()
gzr, err := gzip.NewReader(tarFile)
if err != nil {
return "", err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
switch {
case err == io.EOF:
return "", fmt.Errorf("file is empty")
case err != nil:
return "", err
case header == nil:
continue
}
extractedFilePath := path.Join(targetDir, header.Name)
switch header.Typeflag {
case tar.TypeReg:
// Extract only daprd
if header.Name != "daprd" {
continue
}
f, err := os.OpenFile(extractedFilePath, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
if err != nil {
return "", err
}
if _, err := io.Copy(f, tr); err != nil {
return "", err
}
f.Close()
return extractedFilePath, nil
}
}
}
func moveFileToPath(filepath string, installLocation string) (string, error) {
destDir := daprDefaultLinuxAndMacInstallPath
if runtime.GOOS == daprWindowsOS {
destDir = daprDefaultWindowsInstallPath
filepath = strings.Replace(filepath, "/", "\\", -1)
}
fileName := path_filepath.Base(filepath)
destFilePath := ""
// if user specified --install-path, use that
if installLocation != "" {
destDir = installLocation
}
destFilePath = path.Join(destDir, fileName)
input, err := ioutil.ReadFile(filepath)
if err != nil {
return "", err
}
fmt.Printf("Installing Dapr to %s\n", destDir)
err = utils.CreateDirectory(destDir)
if err != nil {
return "", err
}
if err = ioutil.WriteFile(destFilePath, input, 0644); err != nil {
if runtime.GOOS != daprWindowsOS && strings.Contains(err.Error(), "permission denied") {
err = errors.New(err.Error() + " - please run with sudo")
}
return "", err
}
if runtime.GOOS == daprWindowsOS {
p := os.Getenv("PATH")
if !strings.Contains(strings.ToLower(p), strings.ToLower(destDir)) {
_, err := utils.RunCmdAndWait("SETX", "PATH", p+fmt.Sprintf(";%s", destDir))
if err != nil {
return "", err
}
}
return fmt.Sprintf("%s\\daprd.exe", destDir), nil
}
if installLocation != "" {
color.Set(color.FgYellow)
fmt.Printf("\nDapr installed to %s, please run the following to add it to your path:\n", destDir)
fmt.Printf(" export PATH=$PATH:%s\n", destDir)
color.Unset()
}
return destFilePath, nil
}
type githubRepoReleaseItem struct {
URL string `json:"url"`
TagName string `json:"tag_name"`
Name string `json:"name"`
Draft bool `json:"draft"`
}
// nolint:gosec
func getLatestRelease(gitHubOrg, gitHubRepo string) (string, error) {
releaseURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases", gitHubOrg, gitHubRepo)
resp, err := http.Get(releaseURL)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return "", fmt.Errorf("%s - %s", releaseURL, resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
var githubRepoReleases []githubRepoReleaseItem
err = json.Unmarshal(body, &githubRepoReleases)
if err != nil {
return "", err
}
if len(githubRepoReleases) == 0 {
return "", fmt.Errorf("no releases")
}
for _, release := range githubRepoReleases {
if !strings.Contains(release.TagName, "-rc") {
return release.TagName, nil
}
}
return "", fmt.Errorf("no releases")
}
// nolint:gosec
func downloadFile(dir string, url string) (string, error) {
tokens := strings.Split(url, "/")
fileName := tokens[len(tokens)-1]
filepath := path.Join(dir, fileName)
_, err := os.Stat(filepath)
if os.IsExist(err) {
return "", nil
}
resp, err := http.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode == 404 {
return "", errors.New("runtime version not found")
} else if resp.StatusCode != 200 {
return "", fmt.Errorf("download failed with %d", resp.StatusCode)
}
out, err := os.Create(filepath)
if err != nil {
return "", err
}
defer out.Close()
_, err = io.Copy(out, resp.Body)
if err != nil {
return "", err
}
return filepath, nil
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
libunix/libunix_test.go | package libunix
import (
"os"
"runtime"
"testing"
)
func TestCurrentUser(t *testing.T) {
var userEnv string
if runtime.GOOS == "windows" {
userEnv = os.Getenv("USERNAME")
} else {
userEnv = os.Getenv("USER")
}
username, err := CurrentUser()
if userEnv != "" && err != nil {
t.Fatalf("If $USER is not blank, error should not happen. Error: %v", err)
}
if userEnv != username {
t.Errorf("Fetched the wrong username. $USER: %v, username: %v", userEnv, username)
}
}
| [
"\"USERNAME\"",
"\"USER\""
]
| []
| [
"USER",
"USERNAME"
]
| [] | ["USER", "USERNAME"] | go | 2 | 0 | |
models.py | import os
import enum
from typing import Counter
from sqlalchemy import Column, String, Integer, create_engine
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import backref, relationship
from sqlalchemy.sql.expression import false, null
from sqlalchemy.sql.schema import ForeignKey, PrimaryKeyConstraint, Table, MetaData
from sqlalchemy.sql.sqltypes import Boolean, Float
from config import init_env_vars
Base = declarative_base()
init_env_vars()
### UNCOMMENT these below vars to enable for local
# database_name = os.getenv('DB_NAME')
# database_username = os.getenv('DB_USER')
# database_password = os.getenv('DB_PASSWORD')
# database_path = "postgresql://{}:{}@{}/{}"\
# .format(database_username, database_password, 'localhost:5432', database_name)
### HEROKU REQUIREMENTS
database_path = os.environ.get('DATABASE_URL').replace("://", "ql://", 1)
db = SQLAlchemy()
'''
setup_db(app)
binds a flask application and a SQLAlchemy service
'''
def setup_db(app, database_path=database_path):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = app
db.init_app(app)
db.create_all()
Migrate(app, db)
def session_revert():
db.session.rollback()
def session_close():
db.session.close()
'''
Schema Configuration
'''
class Reservation (db.Model):
__tablename__ = 'reservation'
id = Column(Integer, primary_key=True)
vehicle_id = Column(Integer, ForeignKey('vehicle.id'), nullable=False)
customer_id = Column(Integer, ForeignKey('customer.id'), nullable=False)
employee_id = Column(Integer, ForeignKey('employee.id'), nullable=False)
# implemented the time attrib, if time allows
# start_time =
# end_time =
cost = Column(Float, nullable=False)
reservation_open = Column(Boolean, nullable=False)
vehicle =relationship('Vehicle', uselist=False, foreign_keys=[vehicle_id])
customer=relationship('Customer', uselist=False, foreign_keys=[customer_id])
employee=relationship('Employee', uselist=False, foreign_keys=[employee_id])
def __init__(self, vehicle_id, customer_id,
employee_id, cost, reservation_open):
self.vehicle_id = vehicle_id
self.customer_id = customer_id
self.employee_id = employee_id
self.cost = cost
self.reservation_open = reservation_open
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def get_cust_info(id):
return Customer.query.filter_by(id=id).first()
def get_emp_info(id):
return Employee.query.filter_by(id=id).first()
def get_veh_info(id):
return Vehicle.query.filter_by(id=id).first()
def format(self):
customer = Reservation.get_cust_info(self.customer_id)
employee = Reservation.get_emp_info(self.employee_id)
vehicle = Reservation.get_veh_info(self.vehicle_id)
return {
'id' : self.id,
'cost': self.cost,
'customer_name': customer.first_name + ' ' + customer.last_name,
'employee_name': employee.first_name + ' ' + employee.last_name,
'vehicle_id': self.vehicle_id,
'vehicle_make_and_model': vehicle.make + ' ' + vehicle.model,
'reservation_open' : self.reservation_open
}
class Vehicle(db.Model):
__tablename__= 'vehicle'
id = Column(Integer, primary_key=True)
make = Column(String, nullable=False)
model = Column(String, nullable=False)
year = Column(Integer, nullable=False)
body_style = Column(String)
color = Column(String)
currently_rented = Column(Boolean, nullable=False)
reservations = relationship('Reservation', back_populates='vehicle')
def __init__(self, make, model, year, body_style, color,
currently_rented):
self.make = make
self.model = model
self.year = year
self.body_style = body_style
self.color = color
self.currently_rented = currently_rented
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'make': self.make,
'model': self.model,
'year': self.year,
'body_style': self.body_style,
'color': self.color,
'currently_rented': self.currently_rented,
}
class Person(db.Model):
# __tablename__= 'person'
__abstract__ = True
# id = Column(Integer, primary_key=True)
first_name = Column(String, nullable=False)
last_name = Column(String, nullable=False)
address = Column(String, nullable=False)
type = Column(String(50))
__mapper_args__ = {
'polymorphic_on':type,
'polymorphic_identity':'person',
}
class Customer(Person):
__tablename__ = 'customer'
id = Column(Integer, primary_key=True)
reservations = relationship('Reservation', back_populates='customer')
__mapper_args__ = {
'polymorphic_identity':'customer'
}
def __init__(self, first_name, last_name, address, type):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type,
}
class Manager(Person):
__tablename__ = 'manager'
id = Column(Integer, primary_key=True)
employees = relationship('Employee', back_populates='manager')
__mapper_args__ = {
'polymorphic_identity':'manager'
}
def __init__(self, first_name, last_name, address, type):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type
}
class Employee(Person, db.Model):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
manager_id = Column(Integer, ForeignKey('manager.id'))
manager = relationship('Manager', back_populates='employees')
reservations = relationship('Reservation', back_populates='employee')
__mapper_args__ = {
'polymorphic_identity':'employee'
}
def __init__(self, first_name, last_name, address, type, manager_id):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.type = type
self.manager_id = manager_id
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id' : self.id,
'first_name' : self.first_name,
'last_name' : self.last_name,
'address' : self.address,
'type' : self.type,
'manager_id' : self.manager_id
}
'''
Helper functions
'''
def get_vehicle(id):
if id <= 0:
return Vehicle.query.all()
else:
return Vehicle.query.filter_by(id=id).first()
def get_customer(id):
if not id:
return Customer.query.all()
else:
return Customer.query.filter_by(id=id).first()
def get_employee(id):
if not id:
return Employee.query.all()
else:
return Employee.query.filter_by(id=id).first()
def get_manager(id):
if not id:
return Manager.query.all()
else:
return Manager.query.filter_by(id=id).first()
def get_reservation():
return Reservation.query.all() | []
| []
| [
"DATABASE_URL",
"DB_PASSWORD",
"DB_USER",
"DB_NAME"
]
| [] | ["DATABASE_URL", "DB_PASSWORD", "DB_USER", "DB_NAME"] | python | 4 | 0 | |
dummy/image-processing-cloud/main.go | package main
import (
// "compress/gzip"
"bufio"
"bytes"
"context"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"log"
"math/rand"
"net/http"
"os"
"strconv"
"time"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var counter = 0
var N = os.Getenv("RATE_IN_IMAGES") // Sendrate in images e.g. 10 images reveiced before training is started
var maxEpochs = os.Getenv("EPOCHS") // Epochs
var trainingStatus = false
var db *mongo.Database
var imageEdgeTrainEndpoint string = fmt.Sprintf("http://%s:%s/model", os.Getenv("IMAGE_EDGE_IP"), os.Getenv("IMAGE_EDGE_PORT"))
type Request struct {
Img string `json:"img"`
UUID string `json:"uuid"`
}
type Model struct {
Hweights []byte `json:hweights`
Oweights []byte `json:oweights`
}
func sendModel(net *Network) {
log.Println("Start sending Model")
hWeights, err := net.hiddenWeights.MarshalBinary()
if err != nil {
log.Println("Error sending model.")
}
oWeights, err := net.outputWeights.MarshalBinary()
if err != nil {
log.Println("Error sending model.")
}
model := &Model{Hweights: hWeights, Oweights: oWeights}
data, err := json.Marshal(model)
if err != nil {
return
}
req, err := http.NewRequest("POST", imageEdgeTrainEndpoint, bytes.NewReader(data))
if err != nil {
return
}
log.Printf("send,%s", strconv.FormatInt(time.Now().UnixNano(), 10))
_, err = (&http.Client{}).Do(req)
if err != nil {
log.Print(err)
}
log.Println("Sended Model")
}
func trainData(d Request) {
NInt, _ := strconv.Atoi(N)
maxEpochsInt, _ := strconv.Atoi(maxEpochs)
if counter >= NInt && !trainingStatus {
trainingStatus = true
log.Println("Taining starts")
// source: https://github.com/sausheong/gonn
net := CreateNetwork(784, 200, 10, 0.1)
rand.Seed(time.Now().UTC().UnixNano())
t1 := time.Now()
for epochs := 0; epochs < maxEpochsInt; epochs++ { // epochs < 5
testFile, _ := os.Open("mnist_train.csv")
r := csv.NewReader(bufio.NewReader(testFile))
for {
record, err := r.Read()
if err == io.EOF {
break
}
inputs := make([]float64, net.inputs)
for i := range inputs {
x, _ := strconv.ParseFloat(record[i], 64)
inputs[i] = (x / 255.0 * 0.999) + 0.001
}
targets := make([]float64, 10)
for i := range targets {
targets[i] = 0.001
}
x, _ := strconv.Atoi(record[0])
targets[x] = 0.999
net.Train(inputs, targets)
}
testFile.Close()
}
elapsed := time.Since(t1)
log.Printf("\nTime taken to train: %s\n", elapsed)
trainingStatus = false
go sendModel(&net)
} else {
counter += 1
}
}
func savePlant(d Request, path string) {
collection := db.Collection(path)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
res, err := collection.InsertOne(ctx, d)
if err != nil {
log.Println(err)
}
log.Println(res)
}
func SickHandler(w http.ResponseWriter, r *http.Request) {
timestamp := strconv.FormatInt(time.Now().UnixNano(), 10)
var data Request
err := json.NewDecoder(r.Body).Decode(&data)
if err != nil {
log.Print(err)
return
}
log.Printf("recv,image,%s,%s", data.UUID, timestamp)
go savePlant(data, "sick")
}
func TrainHandler(w http.ResponseWriter, r *http.Request) {
timestamp := strconv.FormatInt(time.Now().UnixNano(), 10)
var data Request
err := json.NewDecoder(r.Body).Decode(&data)
if err != nil {
log.Print(err)
return
}
log.Printf("recv,image,%s,%s", data.UUID, timestamp)
go savePlant(data, "all")
go trainData(data)
}
func main() {
uri := "mongodb://" + os.Getenv("MONGODB_IP") + ":" + os.Getenv("MONGODB_PORT")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
client, err := mongo.Connect(ctx, options.Client().ApplyURI(uri))
if err != nil {
panic(err)
}
defer func() {
if err = client.Disconnect(ctx); err != nil {
panic(err)
}
}()
db = client.Database("plants")
http.HandleFunc("/sick", SickHandler)
http.HandleFunc("/train", TrainHandler)
http.ListenAndServe(":"+os.Getenv("IMAGE_CLOUD_PORT"), nil)
}
| [
"\"RATE_IN_IMAGES\"",
"\"EPOCHS\"",
"\"IMAGE_EDGE_IP\"",
"\"IMAGE_EDGE_PORT\"",
"\"MONGODB_IP\"",
"\"MONGODB_PORT\"",
"\"IMAGE_CLOUD_PORT\""
]
| []
| [
"MONGODB_PORT",
"IMAGE_EDGE_IP",
"IMAGE_CLOUD_PORT",
"IMAGE_EDGE_PORT",
"RATE_IN_IMAGES",
"MONGODB_IP",
"EPOCHS"
]
| [] | ["MONGODB_PORT", "IMAGE_EDGE_IP", "IMAGE_CLOUD_PORT", "IMAGE_EDGE_PORT", "RATE_IN_IMAGES", "MONGODB_IP", "EPOCHS"] | go | 7 | 0 | |
example/trace/tracing.go | package trace
import (
"context"
"net"
"os"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/jaeger"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
"go.uber.org/zap"
)
func newResource(name string) *resource.Resource {
return resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String(name),
semconv.ServiceInstanceIDKey.String(GetLocalIP()),
semconv.ServiceVersionKey.String("0.0.1"),
)
}
func InstallExportPipeline(ctx context.Context, name string) func() {
url := os.Getenv("JAEGER_TRACE_URL")
if url == "" {
zap.S().Warn("not tracing; set $JAEGER_TRACE_URL")
return func() {}
}
exporter, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(url)))
if err != nil {
zap.S().Fatalf("creating OTLP trace exporter: %v", err)
}
tracerProvider := sdktrace.NewTracerProvider(
sdktrace.WithBatcher(exporter),
sdktrace.WithResource(newResource(name)),
)
otel.SetTracerProvider(tracerProvider)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))
return func() {
if err := tracerProvider.Shutdown(ctx); err != nil {
zap.S().Fatalf("stopping tracer provider: %v", err)
}
}
}
// GetLocalIP returns the non loopback local IP of the host
func GetLocalIP() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback the display it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
return ""
}
| [
"\"JAEGER_TRACE_URL\""
]
| []
| [
"JAEGER_TRACE_URL"
]
| [] | ["JAEGER_TRACE_URL"] | go | 1 | 0 | |
src/pkg/reg/adapter/harbor/base/adapter.go | // Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package base
import (
"fmt"
"net/http"
"os"
"strconv"
"strings"
common_http "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/http/modifier"
common_http_auth "github.com/goharbor/harbor/src/common/http/modifier/auth"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/pkg/reg/adapter/native"
"github.com/goharbor/harbor/src/pkg/reg/model"
"github.com/goharbor/harbor/src/pkg/reg/util"
"github.com/goharbor/harbor/src/pkg/registry/auth/basic"
)
// New creates an instance of the base adapter
func New(registry *model.Registry) (*Adapter, error) {
if isLocalHarbor(registry.URL) {
authorizer := common_http_auth.NewSecretAuthorizer(registry.Credential.AccessSecret)
httpClient := common_http.NewClient(&http.Client{
// when it's a local Harbor instance, the code runs inside the same process with
// core, so insecure transport is ok
// If using the secure one, as we'll replace the URL with 127.0.0.1 and this will
// cause error "x509: cannot validate certificate for 127.0.0.1 because it doesn't contain any IP SANs"
Transport: common_http.GetHTTPTransport(common_http.WithInsecure(true)),
}, authorizer)
client, err := NewClient(registry.URL, httpClient)
if err != nil {
return nil, err
}
return &Adapter{
Adapter: native.NewAdapterWithAuthorizer(registry, authorizer),
Registry: registry,
Client: client,
url: registry.URL,
httpClient: httpClient,
}, nil
}
var authorizers []modifier.Modifier
if registry.Credential != nil {
authorizers = append(authorizers, basic.NewAuthorizer(
registry.Credential.AccessKey,
registry.Credential.AccessSecret))
}
httpClient := common_http.NewClient(&http.Client{
Transport: common_http.GetHTTPTransport(common_http.WithInsecure(registry.Insecure)),
}, authorizers...)
client, err := NewClient(registry.URL, httpClient)
if err != nil {
return nil, err
}
return &Adapter{
Adapter: native.NewAdapter(registry),
Registry: registry,
Client: client,
url: registry.URL,
httpClient: httpClient,
}, nil
}
// Adapter is the base adapter for Harbor
type Adapter struct {
*native.Adapter
Registry *model.Registry
Client *Client
// url and httpClient can be removed if we don't support replicate chartmuseum charts anymore
url string
httpClient *common_http.Client
}
// GetAPIVersion returns the supported API version of the Harbor instance that the adapter is created for
func (a *Adapter) GetAPIVersion() string {
return a.Client.APIVersion
}
// Info provides the information of the Harbor registry instance
func (a *Adapter) Info() (*model.RegistryInfo, error) {
info := &model.RegistryInfo{
Type: model.RegistryTypeHarbor,
SupportedResourceTypes: []string{
model.ResourceTypeImage,
},
SupportedResourceFilters: []*model.FilterStyle{
{
Type: model.FilterTypeName,
Style: model.FilterStyleTypeText,
},
{
Type: model.FilterTypeTag,
Style: model.FilterStyleTypeText,
},
},
SupportedTriggers: []string{
model.TriggerTypeManual,
model.TriggerTypeScheduled,
},
SupportedRepositoryPathComponentType: model.RepositoryPathComponentTypeAtLeastTwo,
}
enabled, err := a.Client.ChartRegistryEnabled()
if err != nil {
return nil, err
}
if enabled {
info.SupportedResourceTypes = append(info.SupportedResourceTypes, model.ResourceTypeChart)
}
labels, err := a.Client.ListLabels()
if err != nil {
return nil, err
}
info.SupportedResourceFilters = append(info.SupportedResourceFilters,
&model.FilterStyle{
Type: model.FilterTypeLabel,
Style: model.FilterStyleTypeList,
Values: labels,
})
return info, nil
}
// PrepareForPush creates projects
func (a *Adapter) PrepareForPush(resources []*model.Resource) error {
projects := map[string]*Project{}
for _, resource := range resources {
if resource == nil {
return errors.New("the resource cannot be null")
}
if resource.Metadata == nil {
return errors.New("the metadata of resource cannot be null")
}
if resource.Metadata.Repository == nil {
return errors.New("the repository of resource cannot be null")
}
if len(resource.Metadata.Repository.Name) == 0 {
return errors.New("the name of the repository cannot be null")
}
paths := strings.Split(resource.Metadata.Repository.Name, "/")
projectName := paths[0]
// handle the public properties
metadata := abstractPublicMetadata(resource.Metadata.Repository.Metadata)
pro, exist := projects[projectName]
if exist {
metadata = mergeMetadata(pro.Metadata, metadata)
}
projects[projectName] = &Project{
Name: projectName,
Metadata: metadata,
}
}
var ps []string
for p := range projects {
ps = append(ps, p)
}
q := fmt.Sprintf("name={%s}", strings.Join(ps, " "))
// get exist projects
queryProjects, err := a.Client.ListProjectsWithQuery(q, false)
if err != nil {
return errors.Wrapf(err, "list projects with query %s", q)
}
existProjects := make(map[string]*Project)
for _, p := range queryProjects {
existProjects[p.Name] = p
}
var notExistProjects []*Project
for _, p := range projects {
_, exist := existProjects[p.Name]
if !exist {
notExistProjects = append(notExistProjects, p)
}
}
for _, project := range notExistProjects {
if err := a.Client.CreateProject(project.Name, project.Metadata); err != nil {
if httpErr, ok := err.(*common_http.Error); ok && httpErr.Code == http.StatusConflict {
log.Debugf("got 409 when trying to create project %s", project.Name)
continue
}
return err
}
log.Debugf("project %s created", project.Name)
}
return nil
}
// ListProjects lists projects
func (a *Adapter) ListProjects(filters []*model.Filter) ([]*Project, error) {
pattern := ""
for _, filter := range filters {
if filter.Type == model.FilterTypeName {
pattern = filter.Value.(string)
break
}
}
var projects []*Project
if len(pattern) > 0 {
substrings := strings.Split(pattern, "/")
projectPattern := substrings[0]
names, ok := util.IsSpecificPathComponent(projectPattern)
if ok {
for _, name := range names {
// trim white space in project name
name = strings.TrimSpace(name)
project, err := a.Client.GetProject(name)
if err != nil {
return nil, err
}
if project == nil {
continue
}
projects = append(projects, project)
}
}
}
if len(projects) > 0 {
var names []string
for _, project := range projects {
names = append(names, project.Name)
}
log.Debugf("parsed the projects %v from pattern %s", names, pattern)
return projects, nil
}
return a.Client.ListProjects("")
}
func abstractPublicMetadata(metadata map[string]interface{}) map[string]interface{} {
if metadata == nil {
return nil
}
public, exist := metadata["public"]
if !exist {
return nil
}
return map[string]interface{}{
"public": public,
}
}
// currently, mergeMetadata only handles the public metadata
func mergeMetadata(metadata1, metadata2 map[string]interface{}) map[string]interface{} {
public := parsePublic(metadata1) && parsePublic(metadata2)
return map[string]interface{}{
"public": strconv.FormatBool(public),
}
}
func parsePublic(metadata map[string]interface{}) bool {
if metadata == nil {
return false
}
pub, exist := metadata["public"]
if !exist {
return false
}
public, ok := pub.(bool)
if ok {
return public
}
pubstr, ok := pub.(string)
if ok {
public, err := strconv.ParseBool(pubstr)
if err != nil {
log.Errorf("failed to parse %s to bool: %v", pubstr, err)
return false
}
return public
}
return false
}
// Project model
type Project struct {
ID int64 `json:"project_id"`
Name string `json:"name"`
Metadata map[string]interface{} `json:"metadata"`
}
func isLocalHarbor(url string) bool {
return url == os.Getenv("CORE_URL")
}
// check whether the current process is running inside core
func isInCore() bool {
return len(os.Getenv("EXT_ENDPOINT")) > 0
}
| [
"\"CORE_URL\"",
"\"EXT_ENDPOINT\""
]
| []
| [
"EXT_ENDPOINT",
"CORE_URL"
]
| [] | ["EXT_ENDPOINT", "CORE_URL"] | go | 2 | 0 | |
tensorpack/utils/serialize.py | # -*- coding: utf-8 -*-
# File: serialize.py
import os
from .develop import create_dummy_func
__all__ = ['loads', 'dumps']
def dumps_msgpack(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object
"""
return msgpack.dumps(obj, use_bin_type=True)
def loads_msgpack(buf):
"""
Args:
buf: the output of `dumps`.
"""
return msgpack.loads(buf, raw=False)
def dumps_pyarrow(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object
"""
return pa.serialize(obj).to_buffer()
def loads_pyarrow(buf):
"""
Args:
buf: the output of `dumps`.
"""
return pa.deserialize(buf)
try:
# fixed in pyarrow 0.9: https://github.com/apache/arrow/pull/1223#issuecomment-359895666
import pyarrow as pa
except ImportError:
pa = None
dumps_pyarrow = create_dummy_func('dumps_pyarrow', ['pyarrow']) # noqa
loads_pyarrow = create_dummy_func('loads_pyarrow', ['pyarrow']) # noqa
try:
import msgpack
import msgpack_numpy
msgpack_numpy.patch()
except ImportError:
assert pa is not None, "pyarrow is a dependency of tensorpack!"
loads_msgpack = create_dummy_func( # noqa
'loads_msgpack', ['msgpack', 'msgpack_numpy'])
dumps_msgpack = create_dummy_func( # noqa
'dumps_msgpack', ['msgpack', 'msgpack_numpy'])
if os.environ.get('TENSORPACK_SERIALIZE', None) == 'msgpack':
loads = loads_msgpack
dumps = dumps_msgpack
else:
loads = loads_pyarrow
dumps = dumps_pyarrow
| []
| []
| [
"TENSORPACK_SERIALIZE"
]
| [] | ["TENSORPACK_SERIALIZE"] | python | 1 | 0 | |
3/wordvalue.py | import os
import urllib.request
# PREWORK
TMP = os.getenv("TMP", "/tmp")
S3 = 'https://bites-data.s3.us-east-2.amazonaws.com/'
DICT = 'dictionary.txt'
DICTIONARY = os.path.join(TMP, DICT)
urllib.request.urlretrieve(f'{S3}{DICT}', DICTIONARY)
scrabble_scores = [(1, "E A O I N R T L S U"), (2, "D G"), (3, "B C M P"),
(4, "F H V W Y"), (5, "K"), (8, "J X"), (10, "Q Z")]
LETTER_SCORES = {letter: score for score, letters in scrabble_scores
for letter in letters.split()}
# start coding
def load_words():
"""Load the words dictionary (DICTIONARY constant) into a list and return it"""
with open(DICTIONARY, "rt") as f:
txt = f.read()
return txt.splitlines()
def calc_word_value(word):
"""Given a word calculate its value using the LETTER_SCORES dict"""
return sum(LETTER_SCORES.get(letter.upper(), 0) for letter in word)
def max_word_value(words):
"""Given a list of words calculate the word with the maximum value and return it"""
return max(words, key=calc_word_value)
| []
| []
| [
"TMP"
]
| [] | ["TMP"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# telelogo directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "telelogo"))
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/metrics/newrelic/agent.py | import os
import logging
import math
import psutil
try:
from ConfigParser import RawConfigParser, NoOptionError, NoSectionError
except ImportError:
from configparser import RawConfigParser, NoOptionError, NoSectionError
import mod_wsgi
from .platform import Client
from ..sampler import Sampler
from ..statistics import Metrics, Stats
_logger = logging.getLogger(__name__)
def configuration_settings(app_name=None, license_key=None,
config_file=None, environment=None):
if config_file is None:
config_file = os.environ.get('NEW_RELIC_CONFIG_FILE', None)
if config_file is not None:
config_object = RawConfigParser()
if config_file:
config_object.read([config_file])
if environment is None:
environment = os.environ.get('NEW_RELIC_ENVIRONMENT', None)
def _option(name, section='newrelic', type=None, **kwargs):
try:
getter = 'get%s' % (type or '')
return getattr(config_object, getter)(section, name)
except NoOptionError:
if 'default' in kwargs:
return kwargs['default']
else:
raise
def option(name, type=None, **kwargs):
sections = []
if environment is not None:
sections.append('newrelic-platform:%s' % environment)
sections.append('newrelic-platform')
if environment is not None:
sections.append('newrelic:%s' % environment)
sections.append('newrelic')
for section in sections:
try:
return _option(name, section, type)
except (NoOptionError, NoSectionError):
pass
if 'default' in kwargs:
return kwargs['default']
if app_name is None:
app_name = os.environ.get('NEW_RELIC_APP_NAME', None)
app_name = option('app_name', default=app_name)
if license_key is None:
license_key = os.environ.get('NEW_RELIC_LICENSE_KEY', None)
license_key = option('license_key', default=license_key)
else:
if app_name is None:
app_name = os.environ.get('NEW_RELIC_APP_NAME', None)
if license_key is None:
license_key = os.environ.get('NEW_RELIC_LICENSE_KEY', None)
if app_name is not None:
app_name = app_name.split(';')[0].strip()
return app_name, license_key
class Agent(object):
guid = 'au.com.dscpl.wsgi.mod_wsgi'
version = '1.1.0'
max_retries = 10
def __init__(self, sampler=None, app_name=None, license_key=None,
config_file=None, environment=None):
self.sampler = None
if mod_wsgi.version < (4, 2, 0):
_logger.fatal('Version 4.2.0 or newer of mod_wsgi is required '
'for running the New Relic platform plugin. The plugin '
'has been disabled.')
return
app_name, license_key = configuration_settings(app_name,
license_key, config_file, environment)
if not license_key or not app_name:
_logger.fatal('Either the license key or application name was '
'not specified for the New Relic platform plugin. The '
'plugin has been disabled.')
return
_logger.info('New Relic platform plugin reporting to %r.', app_name)
self.client = Client(license_key)
self.license_key = license_key
self.app_name = app_name
self.sampler = sampler or Sampler()
self.sampler.register(self.process)
self.metrics = Metrics()
self.epoch = None
self.retries = 0
def upload(self, metrics, duration):
try:
self.client.send_metrics(self.app_name, self.guid, self.version,
duration, metrics)
except self.client.RetryDataForRequest:
return True
except Exception:
pass
return False
def record(self, name, value):
name = 'Component/' + name
self.metrics.merge_value(name, value)
def rollover(self):
self.metrics = Metrics()
self.epoch = None
self.retries = 0
def process(self, scoreboard):
# Record metric to track how many Apache server instances are
# reporting. The 'Server/Instances' metric should be charted as
# a 'Count', rounded to an 'Integer'.
self.record('Server/Instances[|servers]', 0)
# If this is the first sampling period, take that to mean that
# this is a new process and Apache was just (re)started. If we
# are being told the sampler is exiting, we take it that Apache
# is being shutdown. Both can show up if shutdown during the
# first sampling period. The 'Server/Lifecycle' metrics should
# be charted as a 'Count', rounded to an 'Integer'.
if scoreboard.sample_periods == 1:
self.record('Server/Lifecycle/Starting[|servers]', 0)
if scoreboard.sampler_exiting:
self.record('Server/Lifecycle/Stopping[|servers]', 0)
# Record metric to track how many processes are in use. This is
# calculated as an average from the total number which which
# were reported in use in each individual sample. The
# 'Process/Instances' metric should be charted as a 'Count',
# rounded to an 'Integer'.
self.record('Processes/Instances[|processes]', Stats(
count=scoreboard.processes_running))
# Also separately record how many processes were counted as
# having been started or stopped in the sampling period. These
# would be used to represent the amount of process churn which
# is occuring due to Apache's dynamic management of the number
# of processes. The 'Process/Lifecycle' metrics should be
# charted as a 'Count', rounded to an 'Integer'.
self.record('Processes/Lifecycle/Starting[|processes]',
Stats(count=scoreboard.processes_started_count))
self.record('Processes/Lifecycle/Stopping[|processes]',
Stats(count=scoreboard.processes_stopped_count))
# Record metric to track how many workers are in idle and busy
# states. This is calculated as an average from the total number
# which were reported in each state in each individual sample.
# The 'Workers/Availability' metrics should be charted as a
# 'Count', rounded to an 'Integer'.
self.record('Workers/Availability/Idle[|workers]', Stats(
count=scoreboard.workers_idle))
self.record('Workers/Availability/Busy[|workers]', Stats(
count=scoreboard.workers_busy))
# Record metric to track more fine grained status of each
# worker. This is calculated as an average from the total number
# which were reported in each state in each individual sample.
# The 'Workers/Status' metrics should be charted as 'Average'
# value, rounded to an 'Integer'.
for label, value in scoreboard.workers_status.items():
self.record('Workers/Status/%s[workers]' % label, value)
# Record metric to track the utilisation of the server. The
# 'Workers/Utilization' metric should be charted as 'Average
# value', with number format of 'Percentage'.
self.record('Workers/Utilization[server]',
scoreboard.workers_utilization)
# Record metric to track the request throughput. The
# 'Requests/Throughput' metric should be charted as 'Throughput'.
self.record('Requests/Throughput[|requests]', Stats(
count=scoreboard.access_count_delta,
total=scoreboard.access_count_delta))
# Record metric to track number of bytes served up. This is
# believed only to be from response content. There is no known
# separate measure for bytes uploaded. The 'Requests/Bytes Served'
# should be charted as 'Rate'.
self.record('Requests/Bytes Served[bytes]',
scoreboard.bytes_served_delta)
# Record metric to track request response time. This is
# calculated as an average from the request samples. That is, it
# is not across all requests. The 'Requests/Response Time'
# metric should be charted as 'Average'.
for request in scoreboard.request_samples:
self.record('Requests/Response Time[seconds|request]',
request.duration)
# Record metric to track percentile breakdown of request
# response time. That is, it is not across all requests. The
# 'Requests/Percentiles' metric should be charted as 'Average'.
for label, value in scoreboard.request_percentiles.items():
self.record('Requests/Percentiles/%s[seconds]' % label, value)
# Record metric to track what percentage of all requests were
# captured as samples. The 'Requests/Sample Quality' metric
# should be charted as 'Average' converted to a 'Percentage'.
self.record('Requests/Sample Quality[requests]',
scoreboard.request_samples_quality)
user_time = 0.0
system_time = 0.0
memory_rss = 0
for process in scoreboard.processes_system_info.values():
user_time += process['cpu_user_time']
system_time += process['cpu_system_time']
memory_rss += process['memory_rss']
# Record metric to track memory usage by processes. The
# 'Processes/Memory/Physical' metric should be charted as
# 'Average'.
self.record('Processes/Memory/Physical[bytes]',
process['memory_rss'])
# Record metrics to track the number of context switches.
# The 'Processes/Context Switches' metrics should be charted
# as 'Rate'.
self.record('Processes/Context Switches/Voluntary[context]',
process['ctx_switch_voluntary'])
self.record('Processes/Context Switches/Involuntary[context]',
process['ctx_switch_involuntary'])
# Record metric to track combined memory usage of whole server.
# The 'Server/Memory/Physical' metric should be charted as
# 'Average'.
self.record('Server/Memory/Physical[bytes]', memory_rss)
# Record metric to track the CPU usage for user and system. The
# 'Processes/CPU Usage' metric should be charted as 'Rate'.
self.record('Processes/CPU Usage[cpu]', user_time + system_time)
self.record('Processes/CPU Usage/User[cpu]', user_time)
self.record('Processes/CPU Usage/System[cpu]', system_time)
# Now attempt to upload the metric data to New Relic. Make sure
# we don't try and upload data from too short of a sampling
# period as it will be rejected anyway. Retain any which is too
# short so it is merged with subsequent sampling period.
if self.epoch is not None:
duration = scoreboard.period_end - self.epoch
else:
duration = scoreboard.duration
if duration > 1.0:
retry = self.upload(self.metrics.metrics, duration)
else:
retry = True
# If a failure occurred but the failure type was such that we
# could try again to upload the data, then retain the metrics
# for next time. If we have two many failed attempts though we
# give up.
if retry:
self.retries += 1
if self.retries == self.max_retries:
self.rollover()
elif self.epoch is None:
self.epoch = scoreboard.period_start
else:
self.rollover()
def start(self):
if self.sampler is not None:
self.sampler.start()
| []
| []
| [
"NEW_RELIC_LICENSE_KEY",
"NEW_RELIC_CONFIG_FILE",
"NEW_RELIC_APP_NAME",
"NEW_RELIC_ENVIRONMENT"
]
| [] | ["NEW_RELIC_LICENSE_KEY", "NEW_RELIC_CONFIG_FILE", "NEW_RELIC_APP_NAME", "NEW_RELIC_ENVIRONMENT"] | python | 4 | 0 | |
Python/Django/projeto_curso_2/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projeto_curso_2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/pydex_app/config.py | """
Put all pyDEX app configs here
author: [email protected]
"""
import os
from zero_ex.contract_addresses import NetworkId
from utils.web3utils import to_base_unit_amount, NULL_ADDRESS
class PydexBaseConfig:
"""Base configuration class for pyDEX App"""
SECRET_KEY = os.environ.get("SECRET_KEY", "development secret key is not safe")
SQLALCHEMY_DATABASE_URI = "sqlite:///{}".format(
os.environ.get("PYDEX_DB_PATH") or "{}/pydex.db".format(os.getcwd()))
SQLALCHEMY_TRACK_MODIFICATIONS = False
TESTING = False
# PYDEX EXCHANGE PARAMS
PYDEX_NETWORK_ID = NetworkId.RINKEBY.value
PYDEX_ZX_FEE_RECIPIENT = NULL_ADDRESS
PYDEX_ZX_MAKER_FEE = to_base_unit_amount(0)
PYDEX_ZX_TAKER_FEE = to_base_unit_amount(0)
PYDEX_WHITELISTED_TOKENS = "*"
# GUI DEFAULT PARAMS
OB_DEFAULT_PAGE = 1
OB_DEFAULT_PER_PAGE = 20
| []
| []
| [
"PYDEX_DB_PATH",
"SECRET_KEY"
]
| [] | ["PYDEX_DB_PATH", "SECRET_KEY"] | python | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.